diff options
Diffstat (limited to 'lib/Analysis')
-rw-r--r-- | lib/Analysis/BasicAliasAnalysis.cpp | 56 | ||||
-rw-r--r-- | lib/Analysis/CMakeLists.txt | 2 | ||||
-rw-r--r-- | lib/Analysis/ConstantFolding.cpp | 159 | ||||
-rw-r--r-- | lib/Analysis/DebugInfo.cpp | 191 | ||||
-rw-r--r-- | lib/Analysis/IPA/Andersens.cpp | 6 | ||||
-rw-r--r-- | lib/Analysis/IVUsers.cpp | 14 | ||||
-rw-r--r-- | lib/Analysis/InstructionSimplify.cpp | 348 | ||||
-rw-r--r-- | lib/Analysis/LazyValueInfo.cpp | 582 | ||||
-rw-r--r-- | lib/Analysis/LiveValues.cpp | 4 | ||||
-rw-r--r-- | lib/Analysis/LoopInfo.cpp | 21 | ||||
-rw-r--r-- | lib/Analysis/MemoryBuiltins.cpp | 114 | ||||
-rw-r--r-- | lib/Analysis/PointerTracking.cpp | 3 | ||||
-rw-r--r-- | lib/Analysis/ScalarEvolution.cpp | 29 | ||||
-rw-r--r-- | lib/Analysis/ValueTracking.cpp | 112 |
14 files changed, 1260 insertions, 381 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index c81190b..b8d69f4 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -23,7 +23,6 @@ #include "llvm/GlobalVariable.h" #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" #include "llvm/Operator.h" #include "llvm/Pass.h" #include "llvm/Target/TargetData.h" @@ -99,7 +98,7 @@ static bool isNonEscapingLocalObject(const Value *V) { /// isObjectSmallerThan - Return true if we can prove that the object specified /// by V is smaller than Size. static bool isObjectSmallerThan(const Value *V, unsigned Size, - LLVMContext &Context, const TargetData &TD) { + const TargetData &TD) { const Type *AccessTy; if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { AccessTy = GV->getType()->getElementType(); @@ -109,7 +108,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size, else return false; } else if (const CallInst* CI = extractMallocCall(V)) { - if (!isArrayMalloc(V, Context, &TD)) + if (!isArrayMalloc(V, &TD)) // The size is the argument to the malloc call. if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getOperand(1))) return (C->getZExtValue() < Size); @@ -647,11 +646,25 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size, const Value *O1 = V1->getUnderlyingObject(); const Value *O2 = V2->getUnderlyingObject(); + // Null values in the default address space don't point to any object, so they + // don't alias any other pointer. + if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) + if (CPN->getType()->getAddressSpace() == 0) + return NoAlias; + if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) + if (CPN->getType()->getAddressSpace() == 0) + return NoAlias; + if (O1 != O2) { // If V1/V2 point to two different objects we know that we have no alias. if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) return NoAlias; - + + // Constant pointers can't alias with non-const isIdentifiedObject objects. + if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || + (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) + return NoAlias; + // Arguments can't alias with local allocations or noalias calls. if ((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) || (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1)))) @@ -665,10 +678,9 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size, // If the size of one access is larger than the entire object on the other // side, then we know such behavior is undefined and can assume no alias. - LLVMContext &Context = V1->getContext(); if (TD) - if ((V1Size != ~0U && isObjectSmallerThan(O2, V1Size, Context, *TD)) || - (V2Size != ~0U && isObjectSmallerThan(O1, V2Size, Context, *TD))) + if ((V1Size != ~0U && isObjectSmallerThan(O2, V1Size, *TD)) || + (V2Size != ~0U && isObjectSmallerThan(O1, V2Size, *TD))) return NoAlias; // If one pointer is the result of a call/invoke and the other is a @@ -707,16 +719,16 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size, // This function is used to determine if the indices of two GEP instructions are // equal. V1 and V2 are the indices. -static bool IndexOperandsEqual(Value *V1, Value *V2, LLVMContext &Context) { +static bool IndexOperandsEqual(Value *V1, Value *V2) { if (V1->getType() == V2->getType()) return V1 == V2; if (Constant *C1 = dyn_cast<Constant>(V1)) if (Constant *C2 = dyn_cast<Constant>(V2)) { // Sign extend the constants to long types, if necessary - if (C1->getType() != Type::getInt64Ty(Context)) - C1 = ConstantExpr::getSExt(C1, Type::getInt64Ty(Context)); - if (C2->getType() != Type::getInt64Ty(Context)) - C2 = ConstantExpr::getSExt(C2, Type::getInt64Ty(Context)); + if (C1->getType() != Type::getInt64Ty(C1->getContext())) + C1 = ConstantExpr::getSExt(C1, Type::getInt64Ty(C1->getContext())); + if (C2->getType() != Type::getInt64Ty(C1->getContext())) + C2 = ConstantExpr::getSExt(C2, Type::getInt64Ty(C1->getContext())); return C1 == C2; } return false; @@ -737,8 +749,6 @@ BasicAliasAnalysis::CheckGEPInstructions( const PointerType *GEPPointerTy = cast<PointerType>(BasePtr1Ty); - LLVMContext &Context = GEPPointerTy->getContext(); - // Find the (possibly empty) initial sequence of equal values... which are not // necessarily constants. unsigned NumGEP1Operands = NumGEP1Ops, NumGEP2Operands = NumGEP2Ops; @@ -746,8 +756,7 @@ BasicAliasAnalysis::CheckGEPInstructions( unsigned MaxOperands = std::max(NumGEP1Operands, NumGEP2Operands); unsigned UnequalOper = 0; while (UnequalOper != MinOperands && - IndexOperandsEqual(GEP1Ops[UnequalOper], GEP2Ops[UnequalOper], - Context)) { + IndexOperandsEqual(GEP1Ops[UnequalOper], GEP2Ops[UnequalOper])) { // Advance through the type as we go... ++UnequalOper; if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr1Ty)) @@ -811,10 +820,11 @@ BasicAliasAnalysis::CheckGEPInstructions( if (Constant *G2OC = dyn_cast<ConstantInt>(const_cast<Value*>(G2Oper))){ if (G1OC->getType() != G2OC->getType()) { // Sign extend both operands to long. - if (G1OC->getType() != Type::getInt64Ty(Context)) - G1OC = ConstantExpr::getSExt(G1OC, Type::getInt64Ty(Context)); - if (G2OC->getType() != Type::getInt64Ty(Context)) - G2OC = ConstantExpr::getSExt(G2OC, Type::getInt64Ty(Context)); + const Type *Int64Ty = Type::getInt64Ty(G1OC->getContext()); + if (G1OC->getType() != Int64Ty) + G1OC = ConstantExpr::getSExt(G1OC, Int64Ty); + if (G2OC->getType() != Int64Ty) + G2OC = ConstantExpr::getSExt(G2OC, Int64Ty); GEP1Ops[FirstConstantOper] = G1OC; GEP2Ops[FirstConstantOper] = G2OC; } @@ -950,7 +960,7 @@ BasicAliasAnalysis::CheckGEPInstructions( for (unsigned i = 0; i != FirstConstantOper; ++i) { if (!isa<StructType>(ZeroIdxTy)) GEP1Ops[i] = GEP2Ops[i] = - Constant::getNullValue(Type::getInt32Ty(Context)); + Constant::getNullValue(Type::getInt32Ty(ZeroIdxTy->getContext())); if (const CompositeType *CT = dyn_cast<CompositeType>(ZeroIdxTy)) ZeroIdxTy = CT->getTypeAtIndex(GEP1Ops[i]); @@ -992,11 +1002,11 @@ BasicAliasAnalysis::CheckGEPInstructions( // if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr1Ty)) GEP1Ops[i] = - ConstantInt::get(Type::getInt64Ty(Context), + ConstantInt::get(Type::getInt64Ty(AT->getContext()), AT->getNumElements()-1); else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr1Ty)) GEP1Ops[i] = - ConstantInt::get(Type::getInt64Ty(Context), + ConstantInt::get(Type::getInt64Ty(VT->getContext()), VT->getNumElements()-1); } } diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt index f21fd54..0a83c3d 100644 --- a/lib/Analysis/CMakeLists.txt +++ b/lib/Analysis/CMakeLists.txt @@ -15,8 +15,10 @@ add_llvm_library(LLVMAnalysis IVUsers.cpp InlineCost.cpp InstCount.cpp + InstructionSimplify.cpp Interval.cpp IntervalPartition.cpp + LazyValueInfo.cpp LibCallAliasAnalysis.cpp LibCallSemantics.cpp LiveValues.cpp diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 33a5792..1cdadbf 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -23,7 +23,6 @@ #include "llvm/GlobalVariable.h" #include "llvm/Instructions.h" #include "llvm/Intrinsics.h" -#include "llvm/LLVMContext.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Target/TargetData.h" #include "llvm/ADT/SmallVector.h" @@ -493,8 +492,7 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){ /// these together. If target data info is available, it is provided as TD, /// otherwise TD is null. static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, - Constant *Op1, const TargetData *TD, - LLVMContext &Context){ + Constant *Op1, const TargetData *TD){ // SROA // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. @@ -521,15 +519,15 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, /// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP /// constant expression, do so. -static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps, +static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps, const Type *ResultTy, - LLVMContext &Context, const TargetData *TD) { Constant *Ptr = Ops[0]; if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized()) return 0; - unsigned BitWidth = TD->getTypeSizeInBits(TD->getIntPtrType(Context)); + unsigned BitWidth = + TD->getTypeSizeInBits(TD->getIntPtrType(Ptr->getContext())); APInt BasePtr(BitWidth, 0); bool BaseIsInt = true; if (!Ptr->isNullValue()) { @@ -558,7 +556,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps, // If the base value for this address is a literal integer value, fold the // getelementptr to the resulting integer value casted to the pointer type. if (BaseIsInt) { - Constant *C = ConstantInt::get(Context, Offset+BasePtr); + Constant *C = ConstantInt::get(Ptr->getContext(), Offset+BasePtr); return ConstantExpr::getIntToPtr(C, ResultTy); } @@ -579,7 +577,8 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps, return 0; APInt NewIdx = Offset.udiv(ElemSize); Offset -= NewIdx * ElemSize; - NewIdxs.push_back(ConstantInt::get(TD->getIntPtrType(Context), NewIdx)); + NewIdxs.push_back(ConstantInt::get(TD->getIntPtrType(Ty->getContext()), + NewIdx)); Ty = ATy->getElementType(); } else if (const StructType *STy = dyn_cast<StructType>(Ty)) { // Determine which field of the struct the offset points into. The @@ -587,7 +586,8 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps, // know the offset is within the struct at this point. const StructLayout &SL = *TD->getStructLayout(STy); unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); - NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), ElIdx)); + NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), + ElIdx)); Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); Ty = STy->getTypeAtIndex(ElIdx); } else { @@ -628,8 +628,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps, /// is returned. Note that this function can only fail when attempting to fold /// instructions like loads and stores, which have no constant expression form. /// -Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext &Context, - const TargetData *TD) { +Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) { if (PHINode *PN = dyn_cast<PHINode>(I)) { if (PN->getNumIncomingValues() == 0) return UndefValue::get(PN->getType()); @@ -656,33 +655,30 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext &Context, return 0; // All operands not constant! if (const CmpInst *CI = dyn_cast<CmpInst>(I)) - return ConstantFoldCompareInstOperands(CI->getPredicate(), - Ops.data(), Ops.size(), - Context, TD); + return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], + TD); if (const LoadInst *LI = dyn_cast<LoadInst>(I)) return ConstantFoldLoadInst(LI, TD); return ConstantFoldInstOperands(I->getOpcode(), I->getType(), - Ops.data(), Ops.size(), Context, TD); + Ops.data(), Ops.size(), TD); } /// ConstantFoldConstantExpression - Attempt to fold the constant expression /// using the specified TargetData. If successful, the constant result is /// result is returned, if not, null is returned. Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE, - LLVMContext &Context, const TargetData *TD) { SmallVector<Constant*, 8> Ops; for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i) Ops.push_back(cast<Constant>(*i)); if (CE->isCompare()) - return ConstantFoldCompareInstOperands(CE->getPredicate(), - Ops.data(), Ops.size(), - Context, TD); + return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], + TD); return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), - Ops.data(), Ops.size(), Context, TD); + Ops.data(), Ops.size(), TD); } /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the @@ -693,13 +689,11 @@ Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE, /// Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, Constant* const* Ops, unsigned NumOps, - LLVMContext &Context, const TargetData *TD) { // Handle easy binops first. if (Instruction::isBinaryOp(Opcode)) { if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1])) - if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD, - Context)) + if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD)) return C; return ConstantExpr::get(Opcode, Ops[0], Ops[1]); @@ -724,7 +718,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, unsigned InWidth = Input->getType()->getScalarSizeInBits(); if (TD->getPointerSizeInBits() < InWidth) { Constant *Mask = - ConstantInt::get(Context, APInt::getLowBitsSet(InWidth, + ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth, TD->getPointerSizeInBits())); Input = ConstantExpr::getAnd(Input, Mask); } @@ -766,7 +760,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, AT->getNumElements()))) { Constant *Index[] = { Constant::getNullValue(CE->getType()), - ConstantInt::get(Context, ElemIdx) + ConstantInt::get(ElTy->getContext(), ElemIdx) }; return ConstantExpr::getGetElementPtr(GV, &Index[0], 2); @@ -800,7 +794,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, case Instruction::ShuffleVector: return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]); case Instruction::GetElementPtr: - if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, DestTy, Context, TD)) + if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, DestTy, TD)) return C; return ConstantExpr::getGetElementPtr(Ops[0], Ops+1, NumOps-1); @@ -812,9 +806,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, /// returns a constant expression of the specified operands. /// Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, - Constant*const * Ops, - unsigned NumOps, - LLVMContext &Context, + Constant *Ops0, Constant *Ops1, const TargetData *TD) { // fold: icmp (inttoptr x), null -> icmp x, 0 // fold: icmp (ptrtoint x), 0 -> icmp x, null @@ -823,17 +815,16 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, // // ConstantExpr::getCompare cannot do this, because it doesn't have TD // around to know if bit truncation is happening. - if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops[0])) { - if (TD && Ops[1]->isNullValue()) { - const Type *IntPtrTy = TD->getIntPtrType(Context); + if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) { + if (TD && Ops1->isNullValue()) { + const Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); if (CE0->getOpcode() == Instruction::IntToPtr) { // Convert the integer value to the right size to ensure we get the // proper extension or truncation. Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), IntPtrTy, false); - Constant *NewOps[] = { C, Constant::getNullValue(C->getType()) }; - return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, - Context, TD); + Constant *Null = Constant::getNullValue(C->getType()); + return ConstantFoldCompareInstOperands(Predicate, C, Null, TD); } // Only do this transformation if the int is intptrty in size, otherwise @@ -841,16 +832,14 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, if (CE0->getOpcode() == Instruction::PtrToInt && CE0->getType() == IntPtrTy) { Constant *C = CE0->getOperand(0); - Constant *NewOps[] = { C, Constant::getNullValue(C->getType()) }; - // FIXME! - return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, - Context, TD); + Constant *Null = Constant::getNullValue(C->getType()); + return ConstantFoldCompareInstOperands(Predicate, C, Null, TD); } } - if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops[1])) { + if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) { if (TD && CE0->getOpcode() == CE1->getOpcode()) { - const Type *IntPtrTy = TD->getIntPtrType(Context); + const Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); if (CE0->getOpcode() == Instruction::IntToPtr) { // Convert the integer value to the right size to ensure we get the @@ -859,26 +848,21 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, IntPtrTy, false); Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), IntPtrTy, false); - Constant *NewOps[] = { C0, C1 }; - return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, - Context, TD); + return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD); } // Only do this transformation if the int is intptrty in size, otherwise // there is a truncation or extension that we aren't modeling. if ((CE0->getOpcode() == Instruction::PtrToInt && CE0->getType() == IntPtrTy && - CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())) { - Constant *NewOps[] = { - CE0->getOperand(0), CE1->getOperand(0) - }; - return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, - Context, TD); - } + CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())) + return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), + CE1->getOperand(0), TD); } } } - return ConstantExpr::getCompare(Predicate, Ops[0], Ops[1]); + + return ConstantExpr::getCompare(Predicate, Ops0, Ops1); } @@ -996,7 +980,7 @@ llvm::canConstantFoldCallTo(const Function *F) { } static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, - const Type *Ty, LLVMContext &Context) { + const Type *Ty) { errno = 0; V = NativeFP(V); if (errno != 0) { @@ -1005,17 +989,15 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, } if (Ty->isFloatTy()) - return ConstantFP::get(Context, APFloat((float)V)); + return ConstantFP::get(Ty->getContext(), APFloat((float)V)); if (Ty->isDoubleTy()) - return ConstantFP::get(Context, APFloat(V)); + return ConstantFP::get(Ty->getContext(), APFloat(V)); llvm_unreachable("Can only constant fold float/double"); return 0; // dummy return to suppress warning } static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), - double V, double W, - const Type *Ty, - LLVMContext &Context) { + double V, double W, const Type *Ty) { errno = 0; V = NativeFP(V, W); if (errno != 0) { @@ -1024,9 +1006,9 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), } if (Ty->isFloatTy()) - return ConstantFP::get(Context, APFloat((float)V)); + return ConstantFP::get(Ty->getContext(), APFloat((float)V)); if (Ty->isDoubleTy()) - return ConstantFP::get(Context, APFloat(V)); + return ConstantFP::get(Ty->getContext(), APFloat(V)); llvm_unreachable("Can only constant fold float/double"); return 0; // dummy return to suppress warning } @@ -1037,7 +1019,6 @@ Constant * llvm::ConstantFoldCall(Function *F, Constant *const *Operands, unsigned NumOperands) { if (!F->hasName()) return 0; - LLVMContext &Context = F->getContext(); StringRef Name = F->getName(); const Type *Ty = F->getReturnType(); @@ -1054,62 +1035,62 @@ llvm::ConstantFoldCall(Function *F, switch (Name[0]) { case 'a': if (Name == "acos") - return ConstantFoldFP(acos, V, Ty, Context); + return ConstantFoldFP(acos, V, Ty); else if (Name == "asin") - return ConstantFoldFP(asin, V, Ty, Context); + return ConstantFoldFP(asin, V, Ty); else if (Name == "atan") - return ConstantFoldFP(atan, V, Ty, Context); + return ConstantFoldFP(atan, V, Ty); break; case 'c': if (Name == "ceil") - return ConstantFoldFP(ceil, V, Ty, Context); + return ConstantFoldFP(ceil, V, Ty); else if (Name == "cos") - return ConstantFoldFP(cos, V, Ty, Context); + return ConstantFoldFP(cos, V, Ty); else if (Name == "cosh") - return ConstantFoldFP(cosh, V, Ty, Context); + return ConstantFoldFP(cosh, V, Ty); else if (Name == "cosf") - return ConstantFoldFP(cos, V, Ty, Context); + return ConstantFoldFP(cos, V, Ty); break; case 'e': if (Name == "exp") - return ConstantFoldFP(exp, V, Ty, Context); + return ConstantFoldFP(exp, V, Ty); break; case 'f': if (Name == "fabs") - return ConstantFoldFP(fabs, V, Ty, Context); + return ConstantFoldFP(fabs, V, Ty); else if (Name == "floor") - return ConstantFoldFP(floor, V, Ty, Context); + return ConstantFoldFP(floor, V, Ty); break; case 'l': if (Name == "log" && V > 0) - return ConstantFoldFP(log, V, Ty, Context); + return ConstantFoldFP(log, V, Ty); else if (Name == "log10" && V > 0) - return ConstantFoldFP(log10, V, Ty, Context); + return ConstantFoldFP(log10, V, Ty); else if (Name == "llvm.sqrt.f32" || Name == "llvm.sqrt.f64") { if (V >= -0.0) - return ConstantFoldFP(sqrt, V, Ty, Context); + return ConstantFoldFP(sqrt, V, Ty); else // Undefined return Constant::getNullValue(Ty); } break; case 's': if (Name == "sin") - return ConstantFoldFP(sin, V, Ty, Context); + return ConstantFoldFP(sin, V, Ty); else if (Name == "sinh") - return ConstantFoldFP(sinh, V, Ty, Context); + return ConstantFoldFP(sinh, V, Ty); else if (Name == "sqrt" && V >= 0) - return ConstantFoldFP(sqrt, V, Ty, Context); + return ConstantFoldFP(sqrt, V, Ty); else if (Name == "sqrtf" && V >= 0) - return ConstantFoldFP(sqrt, V, Ty, Context); + return ConstantFoldFP(sqrt, V, Ty); else if (Name == "sinf") - return ConstantFoldFP(sin, V, Ty, Context); + return ConstantFoldFP(sin, V, Ty); break; case 't': if (Name == "tan") - return ConstantFoldFP(tan, V, Ty, Context); + return ConstantFoldFP(tan, V, Ty); else if (Name == "tanh") - return ConstantFoldFP(tanh, V, Ty, Context); + return ConstantFoldFP(tanh, V, Ty); break; default: break; @@ -1120,7 +1101,7 @@ llvm::ConstantFoldCall(Function *F, if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) { if (Name.startswith("llvm.bswap")) - return ConstantInt::get(Context, Op->getValue().byteSwap()); + return ConstantInt::get(F->getContext(), Op->getValue().byteSwap()); else if (Name.startswith("llvm.ctpop")) return ConstantInt::get(Ty, Op->getValue().countPopulation()); else if (Name.startswith("llvm.cttz")) @@ -1149,18 +1130,20 @@ llvm::ConstantFoldCall(Function *F, Op2->getValueAPF().convertToDouble(); if (Name == "pow") - return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty, Context); + return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); if (Name == "fmod") - return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty, Context); + return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty); if (Name == "atan2") - return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty, Context); + return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); } else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) { if (Name == "llvm.powi.f32") - return ConstantFP::get(Context, APFloat((float)std::pow((float)Op1V, + return ConstantFP::get(F->getContext(), + APFloat((float)std::pow((float)Op1V, (int)Op2C->getZExtValue()))); if (Name == "llvm.powi.f64") - return ConstantFP::get(Context, APFloat((double)std::pow((double)Op1V, - (int)Op2C->getZExtValue()))); + return ConstantFP::get(F->getContext(), + APFloat((double)std::pow((double)Op1V, + (int)Op2C->getZExtValue()))); } return 0; } diff --git a/lib/Analysis/DebugInfo.cpp b/lib/Analysis/DebugInfo.cpp index b64dbf4..8f62245 100644 --- a/lib/Analysis/DebugInfo.cpp +++ b/lib/Analysis/DebugInfo.cpp @@ -366,6 +366,9 @@ bool DIGlobalVariable::Verify() const { if (isNull()) return false; + if (!getDisplayName()) + return false; + if (getContext().isNull()) return false; @@ -406,6 +409,10 @@ uint64_t DIDerivedType::getOriginalTypeSize() const { Tag == dwarf::DW_TAG_const_type || Tag == dwarf::DW_TAG_volatile_type || Tag == dwarf::DW_TAG_restrict_type) { DIType BaseType = getTypeDerivedFrom(); + // If this type is not derived from any type then take conservative + // approach. + if (BaseType.isNull()) + return getSizeInBits(); if (BaseType.isDerivedType()) return DIDerivedType(BaseType.getNode()).getOriginalTypeSize(); else @@ -599,9 +606,7 @@ void DIVariable::dump() const { //===----------------------------------------------------------------------===// DIFactory::DIFactory(Module &m) - : M(m), VMContext(M.getContext()), StopPointFn(0), FuncStartFn(0), - RegionStartFn(0), RegionEndFn(0), - DeclareFn(0) { + : M(m), VMContext(M.getContext()), DeclareFn(0) { EmptyStructPtr = PointerType::getUnqual(StructType::get(VMContext)); } @@ -646,9 +651,9 @@ DISubrange DIFactory::GetOrCreateSubrange(int64_t Lo, int64_t Hi) { /// CreateCompileUnit - Create a new descriptor for the specified compile /// unit. Note that this does not unique compile units within the module. DICompileUnit DIFactory::CreateCompileUnit(unsigned LangID, - StringRef Filename, - StringRef Directory, - StringRef Producer, + const char * Filename, + const char * Directory, + const char * Producer, bool isMain, bool isOptimized, const char *Flags, @@ -670,7 +675,7 @@ DICompileUnit DIFactory::CreateCompileUnit(unsigned LangID, } /// CreateEnumerator - Create a single enumerator value. -DIEnumerator DIFactory::CreateEnumerator(StringRef Name, uint64_t Val){ +DIEnumerator DIFactory::CreateEnumerator(const char * Name, uint64_t Val){ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_enumerator), MDString::get(VMContext, Name), @@ -682,7 +687,7 @@ DIEnumerator DIFactory::CreateEnumerator(StringRef Name, uint64_t Val){ /// CreateBasicType - Create a basic type like int, float, etc. DIBasicType DIFactory::CreateBasicType(DIDescriptor Context, - StringRef Name, + const char * Name, DICompileUnit CompileUnit, unsigned LineNumber, uint64_t SizeInBits, @@ -707,7 +712,7 @@ DIBasicType DIFactory::CreateBasicType(DIDescriptor Context, /// CreateBasicType - Create a basic type like int, float, etc. DIBasicType DIFactory::CreateBasicTypeEx(DIDescriptor Context, - StringRef Name, + const char * Name, DICompileUnit CompileUnit, unsigned LineNumber, Constant *SizeInBits, @@ -734,7 +739,7 @@ DIBasicType DIFactory::CreateBasicTypeEx(DIDescriptor Context, /// pointer, typedef, etc. DIDerivedType DIFactory::CreateDerivedType(unsigned Tag, DIDescriptor Context, - StringRef Name, + const char * Name, DICompileUnit CompileUnit, unsigned LineNumber, uint64_t SizeInBits, @@ -762,7 +767,7 @@ DIDerivedType DIFactory::CreateDerivedType(unsigned Tag, /// pointer, typedef, etc. DIDerivedType DIFactory::CreateDerivedTypeEx(unsigned Tag, DIDescriptor Context, - StringRef Name, + const char * Name, DICompileUnit CompileUnit, unsigned LineNumber, Constant *SizeInBits, @@ -789,7 +794,7 @@ DIDerivedType DIFactory::CreateDerivedTypeEx(unsigned Tag, /// CreateCompositeType - Create a composite type like array, struct, etc. DICompositeType DIFactory::CreateCompositeType(unsigned Tag, DIDescriptor Context, - StringRef Name, + const char * Name, DICompileUnit CompileUnit, unsigned LineNumber, uint64_t SizeInBits, @@ -821,7 +826,7 @@ DICompositeType DIFactory::CreateCompositeType(unsigned Tag, /// CreateCompositeType - Create a composite type like array, struct, etc. DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag, DIDescriptor Context, - StringRef Name, + const char * Name, DICompileUnit CompileUnit, unsigned LineNumber, Constant *SizeInBits, @@ -854,9 +859,9 @@ DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag, /// See comments in DISubprogram for descriptions of these fields. This /// method does not unique the generated descriptors. DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context, - StringRef Name, - StringRef DisplayName, - StringRef LinkageName, + const char * Name, + const char * DisplayName, + const char * LinkageName, DICompileUnit CompileUnit, unsigned LineNo, DIType Type, bool isLocalToUnit, @@ -880,9 +885,9 @@ DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context, /// CreateGlobalVariable - Create a new descriptor for the specified global. DIGlobalVariable -DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name, - StringRef DisplayName, - StringRef LinkageName, +DIFactory::CreateGlobalVariable(DIDescriptor Context, const char * Name, + const char * DisplayName, + const char * LinkageName, DICompileUnit CompileUnit, unsigned LineNo, DIType Type,bool isLocalToUnit, bool isDefinition, llvm::GlobalVariable *Val) { @@ -914,7 +919,7 @@ DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name, /// CreateVariable - Create a new descriptor for the specified variable. DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context, - StringRef Name, + const char * Name, DICompileUnit CompileUnit, unsigned LineNo, DIType Type) { Value *Elts[] = { @@ -976,60 +981,8 @@ DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo, // DIFactory: Routines for inserting code into a function //===----------------------------------------------------------------------===// -/// InsertStopPoint - Create a new llvm.dbg.stoppoint intrinsic invocation, -/// inserting it at the end of the specified basic block. -void DIFactory::InsertStopPoint(DICompileUnit CU, unsigned LineNo, - unsigned ColNo, BasicBlock *BB) { - - // Lazily construct llvm.dbg.stoppoint function. - if (!StopPointFn) - StopPointFn = llvm::Intrinsic::getDeclaration(&M, - llvm::Intrinsic::dbg_stoppoint); - - // Invoke llvm.dbg.stoppoint - Value *Args[] = { - ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LineNo), - ConstantInt::get(llvm::Type::getInt32Ty(VMContext), ColNo), - CU.getNode() - }; - CallInst::Create(StopPointFn, Args, Args+3, "", BB); -} - -/// InsertSubprogramStart - Create a new llvm.dbg.func.start intrinsic to -/// mark the start of the specified subprogram. -void DIFactory::InsertSubprogramStart(DISubprogram SP, BasicBlock *BB) { - // Lazily construct llvm.dbg.func.start. - if (!FuncStartFn) - FuncStartFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_func_start); - - // Call llvm.dbg.func.start which also implicitly sets a stoppoint. - CallInst::Create(FuncStartFn, SP.getNode(), "", BB); -} - -/// InsertRegionStart - Insert a new llvm.dbg.region.start intrinsic call to -/// mark the start of a region for the specified scoping descriptor. -void DIFactory::InsertRegionStart(DIDescriptor D, BasicBlock *BB) { - // Lazily construct llvm.dbg.region.start function. - if (!RegionStartFn) - RegionStartFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_region_start); - - // Call llvm.dbg.func.start. - CallInst::Create(RegionStartFn, D.getNode(), "", BB); -} - -/// InsertRegionEnd - Insert a new llvm.dbg.region.end intrinsic call to -/// mark the end of a region for the specified scoping descriptor. -void DIFactory::InsertRegionEnd(DIDescriptor D, BasicBlock *BB) { - // Lazily construct llvm.dbg.region.end function. - if (!RegionEndFn) - RegionEndFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_region_end); - - // Call llvm.dbg.region.end. - CallInst::Create(RegionEndFn, D.getNode(), "", BB); -} - /// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call. -void DIFactory::InsertDeclare(Value *Storage, DIVariable D, +Instruction *DIFactory::InsertDeclare(Value *Storage, DIVariable D, Instruction *InsertBefore) { // Cast the storage to a {}* for the call to llvm.dbg.declare. Storage = new BitCastInst(Storage, EmptyStructPtr, "", InsertBefore); @@ -1038,11 +991,11 @@ void DIFactory::InsertDeclare(Value *Storage, DIVariable D, DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare); Value *Args[] = { Storage, D.getNode() }; - CallInst::Create(DeclareFn, Args, Args+2, "", InsertBefore); + return CallInst::Create(DeclareFn, Args, Args+2, "", InsertBefore); } /// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call. -void DIFactory::InsertDeclare(Value *Storage, DIVariable D, +Instruction *DIFactory::InsertDeclare(Value *Storage, DIVariable D, BasicBlock *InsertAtEnd) { // Cast the storage to a {}* for the call to llvm.dbg.declare. Storage = new BitCastInst(Storage, EmptyStructPtr, "", InsertAtEnd); @@ -1051,7 +1004,7 @@ void DIFactory::InsertDeclare(Value *Storage, DIVariable D, DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare); Value *Args[] = { Storage, D.getNode() }; - CallInst::Create(DeclareFn, Args, Args+2, "", InsertAtEnd); + return CallInst::Create(DeclareFn, Args, Args+2, "", InsertAtEnd); } @@ -1062,38 +1015,18 @@ void DIFactory::InsertDeclare(Value *Storage, DIVariable D, /// processModule - Process entire module and collect debug info. void DebugInfoFinder::processModule(Module &M) { -#ifdef ATTACH_DEBUG_INFO_TO_AN_INSN MetadataContext &TheMetadata = M.getContext().getMetadata(); unsigned MDDbgKind = TheMetadata.getMDKind("dbg"); -#endif + for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) for (Function::iterator FI = (*I).begin(), FE = (*I).end(); FI != FE; ++FI) for (BasicBlock::iterator BI = (*FI).begin(), BE = (*FI).end(); BI != BE; ++BI) { - if (DbgStopPointInst *SPI = dyn_cast<DbgStopPointInst>(BI)) - processStopPoint(SPI); - else if (DbgFuncStartInst *FSI = dyn_cast<DbgFuncStartInst>(BI)) - processFuncStart(FSI); - else if (DbgRegionStartInst *DRS = dyn_cast<DbgRegionStartInst>(BI)) - processRegionStart(DRS); - else if (DbgRegionEndInst *DRE = dyn_cast<DbgRegionEndInst>(BI)) - processRegionEnd(DRE); - else if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI)) + if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI)) processDeclare(DDI); -#ifdef ATTACH_DEBUG_INFO_TO_AN_INSN - else if (MDDbgKind) { - if (MDNode *L = TheMetadata.getMD(MDDbgKind, BI)) { - DILocation Loc(L); - DIScope S(Loc.getScope().getNode()); - if (S.isCompileUnit()) - addCompileUnit(DICompileUnit(S.getNode())); - else if (S.isSubprogram()) - processSubprogram(DISubprogram(S.getNode())); - else if (S.isLexicalBlock()) - processLexicalBlock(DILexicalBlock(S.getNode())); - } - } -#endif + else if (MDDbgKind) + if (MDNode *L = TheMetadata.getMD(MDDbgKind, BI)) + processLocation(DILocation(L)); } NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv"); @@ -1109,6 +1042,20 @@ void DebugInfoFinder::processModule(Module &M) { } } +/// processLocation - Process DILocation. +void DebugInfoFinder::processLocation(DILocation Loc) { + if (Loc.isNull()) return; + DIScope S(Loc.getScope().getNode()); + if (S.isNull()) return; + if (S.isCompileUnit()) + addCompileUnit(DICompileUnit(S.getNode())); + else if (S.isSubprogram()) + processSubprogram(DISubprogram(S.getNode())); + else if (S.isLexicalBlock()) + processLexicalBlock(DILexicalBlock(S.getNode())); + processLocation(Loc.getOrigLocation()); +} + /// processType - Process DIType. void DebugInfoFinder::processType(DIType DT) { if (!addType(DT)) @@ -1156,30 +1103,6 @@ void DebugInfoFinder::processSubprogram(DISubprogram SP) { processType(SP.getType()); } -/// processStopPoint - Process DbgStopPointInst. -void DebugInfoFinder::processStopPoint(DbgStopPointInst *SPI) { - MDNode *Context = dyn_cast<MDNode>(SPI->getContext()); - addCompileUnit(DICompileUnit(Context)); -} - -/// processFuncStart - Process DbgFuncStartInst. -void DebugInfoFinder::processFuncStart(DbgFuncStartInst *FSI) { - MDNode *SP = dyn_cast<MDNode>(FSI->getSubprogram()); - processSubprogram(DISubprogram(SP)); -} - -/// processRegionStart - Process DbgRegionStart. -void DebugInfoFinder::processRegionStart(DbgRegionStartInst *DRS) { - MDNode *SP = dyn_cast<MDNode>(DRS->getContext()); - processSubprogram(DISubprogram(SP)); -} - -/// processRegionEnd - Process DbgRegionEnd. -void DebugInfoFinder::processRegionEnd(DbgRegionEndInst *DRE) { - MDNode *SP = dyn_cast<MDNode>(DRE->getContext()); - processSubprogram(DISubprogram(SP)); -} - /// processDeclare - Process DbgDeclareInst. void DebugInfoFinder::processDeclare(DbgDeclareInst *DDI) { DIVariable DV(cast<MDNode>(DDI->getVariable())); @@ -1475,22 +1398,4 @@ bool getLocationInfo(const Value *V, std::string &DisplayName, return DebugLoc::get(Id); } - - /// isInlinedFnStart - Return true if FSI is starting an inlined function. - bool isInlinedFnStart(DbgFuncStartInst &FSI, const Function *CurrentFn) { - DISubprogram Subprogram(cast<MDNode>(FSI.getSubprogram())); - if (Subprogram.describes(CurrentFn)) - return false; - - return true; - } - - /// isInlinedFnEnd - Return true if REI is ending an inlined function. - bool isInlinedFnEnd(DbgRegionEndInst &REI, const Function *CurrentFn) { - DISubprogram Subprogram(cast<MDNode>(REI.getContext())); - if (Subprogram.isNull() || Subprogram.describes(CurrentFn)) - return false; - - return true; - } } diff --git a/lib/Analysis/IPA/Andersens.cpp b/lib/Analysis/IPA/Andersens.cpp index 17f304c..40a8cd5 100644 --- a/lib/Analysis/IPA/Andersens.cpp +++ b/lib/Analysis/IPA/Andersens.cpp @@ -518,7 +518,7 @@ namespace { /// getObject - Return the node corresponding to the memory object for the /// specified global or allocation instruction. unsigned getObject(Value *V) const { - DenseMap<Value*, unsigned>::iterator I = ObjectNodes.find(V); + DenseMap<Value*, unsigned>::const_iterator I = ObjectNodes.find(V); assert(I != ObjectNodes.end() && "Value does not have an object in the points-to graph!"); return I->second; @@ -527,7 +527,7 @@ namespace { /// getReturnNode - Return the node representing the return value for the /// specified function. unsigned getReturnNode(Function *F) const { - DenseMap<Function*, unsigned>::iterator I = ReturnNodes.find(F); + DenseMap<Function*, unsigned>::const_iterator I = ReturnNodes.find(F); assert(I != ReturnNodes.end() && "Function does not return a value!"); return I->second; } @@ -535,7 +535,7 @@ namespace { /// getVarargNode - Return the node representing the variable arguments /// formal for the specified function. unsigned getVarargNode(Function *F) const { - DenseMap<Function*, unsigned>::iterator I = VarargNodes.find(F); + DenseMap<Function*, unsigned>::const_iterator I = VarargNodes.find(F); assert(I != VarargNodes.end() && "Function does not take var args!"); return I->second; } diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp index 543e017..cf52320 100644 --- a/lib/Analysis/IVUsers.cpp +++ b/lib/Analysis/IVUsers.cpp @@ -151,6 +151,8 @@ static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV, if (L->contains(User->getParent())) return false; BasicBlock *LatchBlock = L->getLoopLatch(); + if (!LatchBlock) + return false; // Ok, the user is outside of the loop. If it is dominated by the latch // block, use the post-inc value. @@ -265,6 +267,18 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) { return true; } +void IVUsers::AddUser(const SCEV *Stride, const SCEV *Offset, + Instruction *User, Value *Operand) { + IVUsersOfOneStride *StrideUses = IVUsesByStride[Stride]; + if (!StrideUses) { // First occurrence of this stride? + StrideOrder.push_back(Stride); + StrideUses = new IVUsersOfOneStride(Stride); + IVUses.push_back(StrideUses); + IVUsesByStride[Stride] = StrideUses; + } + IVUsesByStride[Stride]->addUser(Offset, User, Operand); +} + IVUsers::IVUsers() : LoopPass(&ID) { } diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp new file mode 100644 index 0000000..f9953e3 --- /dev/null +++ b/lib/Analysis/InstructionSimplify.cpp @@ -0,0 +1,348 @@ +//===- InstructionSimplify.cpp - Fold instruction operands ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements routines for folding instructions into simpler forms +// that do not require creating new instructions. For example, this does +// constant folding, and can handle identities like (X&0)->0. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Analysis/InstructionSimplify.h" +#include "llvm/Analysis/ConstantFolding.h" +#include "llvm/Support/ValueHandle.h" +#include "llvm/Instructions.h" +#include "llvm/Support/PatternMatch.h" +using namespace llvm; +using namespace llvm::PatternMatch; + +/// SimplifyAndInst - Given operands for an And, see if we can +/// fold the result. If not, this returns null. +Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, + const TargetData *TD) { + if (Constant *CLHS = dyn_cast<Constant>(Op0)) { + if (Constant *CRHS = dyn_cast<Constant>(Op1)) { + Constant *Ops[] = { CLHS, CRHS }; + return ConstantFoldInstOperands(Instruction::And, CLHS->getType(), + Ops, 2, TD); + } + + // Canonicalize the constant to the RHS. + std::swap(Op0, Op1); + } + + // X & undef -> 0 + if (isa<UndefValue>(Op1)) + return Constant::getNullValue(Op0->getType()); + + // X & X = X + if (Op0 == Op1) + return Op0; + + // X & <0,0> = <0,0> + if (isa<ConstantAggregateZero>(Op1)) + return Op1; + + // X & <-1,-1> = X + if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1)) + if (CP->isAllOnesValue()) + return Op0; + + if (ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1)) { + // X & 0 = 0 + if (Op1CI->isZero()) + return Op1CI; + // X & -1 = X + if (Op1CI->isAllOnesValue()) + return Op0; + } + + // A & ~A = ~A & A = 0 + Value *A, *B; + if ((match(Op0, m_Not(m_Value(A))) && A == Op1) || + (match(Op1, m_Not(m_Value(A))) && A == Op0)) + return Constant::getNullValue(Op0->getType()); + + // (A | ?) & A = A + if (match(Op0, m_Or(m_Value(A), m_Value(B))) && + (A == Op1 || B == Op1)) + return Op1; + + // A & (A | ?) = A + if (match(Op1, m_Or(m_Value(A), m_Value(B))) && + (A == Op0 || B == Op0)) + return Op0; + + return 0; +} + +/// SimplifyOrInst - Given operands for an Or, see if we can +/// fold the result. If not, this returns null. +Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, + const TargetData *TD) { + if (Constant *CLHS = dyn_cast<Constant>(Op0)) { + if (Constant *CRHS = dyn_cast<Constant>(Op1)) { + Constant *Ops[] = { CLHS, CRHS }; + return ConstantFoldInstOperands(Instruction::Or, CLHS->getType(), + Ops, 2, TD); + } + + // Canonicalize the constant to the RHS. + std::swap(Op0, Op1); + } + + // X | undef -> -1 + if (isa<UndefValue>(Op1)) + return Constant::getAllOnesValue(Op0->getType()); + + // X | X = X + if (Op0 == Op1) + return Op0; + + // X | <0,0> = X + if (isa<ConstantAggregateZero>(Op1)) + return Op0; + + // X | <-1,-1> = <-1,-1> + if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1)) + if (CP->isAllOnesValue()) + return Op1; + + if (ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1)) { + // X | 0 = X + if (Op1CI->isZero()) + return Op0; + // X | -1 = -1 + if (Op1CI->isAllOnesValue()) + return Op1CI; + } + + // A | ~A = ~A | A = -1 + Value *A, *B; + if ((match(Op0, m_Not(m_Value(A))) && A == Op1) || + (match(Op1, m_Not(m_Value(A))) && A == Op0)) + return Constant::getAllOnesValue(Op0->getType()); + + // (A & ?) | A = A + if (match(Op0, m_And(m_Value(A), m_Value(B))) && + (A == Op1 || B == Op1)) + return Op1; + + // A | (A & ?) = A + if (match(Op1, m_And(m_Value(A), m_Value(B))) && + (A == Op0 || B == Op0)) + return Op0; + + return 0; +} + + + + +static const Type *GetCompareTy(Value *Op) { + return CmpInst::makeCmpResultType(Op->getType()); +} + + +/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can +/// fold the result. If not, this returns null. +Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, + const TargetData *TD) { + CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; + assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!"); + + if (Constant *CLHS = dyn_cast<Constant>(LHS)) { + if (Constant *CRHS = dyn_cast<Constant>(RHS)) + return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, TD); + + // If we have a constant, make sure it is on the RHS. + std::swap(LHS, RHS); + Pred = CmpInst::getSwappedPredicate(Pred); + } + + // ITy - This is the return type of the compare we're considering. + const Type *ITy = GetCompareTy(LHS); + + // icmp X, X -> true/false + if (LHS == RHS) + return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred)); + + if (isa<UndefValue>(RHS)) // X icmp undef -> undef + return UndefValue::get(ITy); + + // icmp <global/alloca*/null>, <global/alloca*/null> - Global/Stack value + // addresses never equal each other! We already know that Op0 != Op1. + if ((isa<GlobalValue>(LHS) || isa<AllocaInst>(LHS) || + isa<ConstantPointerNull>(LHS)) && + (isa<GlobalValue>(RHS) || isa<AllocaInst>(RHS) || + isa<ConstantPointerNull>(RHS))) + return ConstantInt::get(ITy, CmpInst::isFalseWhenEqual(Pred)); + + // See if we are doing a comparison with a constant. + if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { + // If we have an icmp le or icmp ge instruction, turn it into the + // appropriate icmp lt or icmp gt instruction. This allows us to rely on + // them being folded in the code below. + switch (Pred) { + default: break; + case ICmpInst::ICMP_ULE: + if (CI->isMaxValue(false)) // A <=u MAX -> TRUE + return ConstantInt::getTrue(CI->getContext()); + break; + case ICmpInst::ICMP_SLE: + if (CI->isMaxValue(true)) // A <=s MAX -> TRUE + return ConstantInt::getTrue(CI->getContext()); + break; + case ICmpInst::ICMP_UGE: + if (CI->isMinValue(false)) // A >=u MIN -> TRUE + return ConstantInt::getTrue(CI->getContext()); + break; + case ICmpInst::ICMP_SGE: + if (CI->isMinValue(true)) // A >=s MIN -> TRUE + return ConstantInt::getTrue(CI->getContext()); + break; + } + } + + + return 0; +} + +/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can +/// fold the result. If not, this returns null. +Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, + const TargetData *TD) { + CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; + assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!"); + + if (Constant *CLHS = dyn_cast<Constant>(LHS)) { + if (Constant *CRHS = dyn_cast<Constant>(RHS)) + return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, TD); + + // If we have a constant, make sure it is on the RHS. + std::swap(LHS, RHS); + Pred = CmpInst::getSwappedPredicate(Pred); + } + + // Fold trivial predicates. + if (Pred == FCmpInst::FCMP_FALSE) + return ConstantInt::get(GetCompareTy(LHS), 0); + if (Pred == FCmpInst::FCMP_TRUE) + return ConstantInt::get(GetCompareTy(LHS), 1); + + if (isa<UndefValue>(RHS)) // fcmp pred X, undef -> undef + return UndefValue::get(GetCompareTy(LHS)); + + // fcmp x,x -> true/false. Not all compares are foldable. + if (LHS == RHS) { + if (CmpInst::isTrueWhenEqual(Pred)) + return ConstantInt::get(GetCompareTy(LHS), 1); + if (CmpInst::isFalseWhenEqual(Pred)) + return ConstantInt::get(GetCompareTy(LHS), 0); + } + + // Handle fcmp with constant RHS + if (Constant *RHSC = dyn_cast<Constant>(RHS)) { + // If the constant is a nan, see if we can fold the comparison based on it. + if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) { + if (CFP->getValueAPF().isNaN()) { + if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo" + return ConstantInt::getFalse(CFP->getContext()); + assert(FCmpInst::isUnordered(Pred) && + "Comparison must be either ordered or unordered!"); + // True if unordered. + return ConstantInt::getTrue(CFP->getContext()); + } + } + } + + return 0; +} + +//=== Helper functions for higher up the class hierarchy. + +/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can +/// fold the result. If not, this returns null. +Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, + const TargetData *TD) { + switch (Opcode) { + case Instruction::And: return SimplifyAndInst(LHS, RHS, TD); + case Instruction::Or: return SimplifyOrInst(LHS, RHS, TD); + default: + if (Constant *CLHS = dyn_cast<Constant>(LHS)) + if (Constant *CRHS = dyn_cast<Constant>(RHS)) { + Constant *COps[] = {CLHS, CRHS}; + return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, 2, TD); + } + return 0; + } +} + +/// SimplifyCmpInst - Given operands for a CmpInst, see if we can +/// fold the result. +Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, + const TargetData *TD) { + if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate)) + return SimplifyICmpInst(Predicate, LHS, RHS, TD); + return SimplifyFCmpInst(Predicate, LHS, RHS, TD); +} + + +/// SimplifyInstruction - See if we can compute a simplified version of this +/// instruction. If not, this returns null. +Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD) { + switch (I->getOpcode()) { + default: + return ConstantFoldInstruction(I, TD); + case Instruction::And: + return SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD); + case Instruction::Or: + return SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD); + case Instruction::ICmp: + return SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), + I->getOperand(0), I->getOperand(1), TD); + case Instruction::FCmp: + return SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), + I->getOperand(0), I->getOperand(1), TD); + } +} + +/// ReplaceAndSimplifyAllUses - Perform From->replaceAllUsesWith(To) and then +/// delete the From instruction. In addition to a basic RAUW, this does a +/// recursive simplification of the newly formed instructions. This catches +/// things where one simplification exposes other opportunities. This only +/// simplifies and deletes scalar operations, it does not change the CFG. +/// +void llvm::ReplaceAndSimplifyAllUses(Instruction *From, Value *To, + const TargetData *TD) { + assert(From != To && "ReplaceAndSimplifyAllUses(X,X) is not valid!"); + + // FromHandle - This keeps a weakvh on the from value so that we can know if + // it gets deleted out from under us in a recursive simplification. + WeakVH FromHandle(From); + + while (!From->use_empty()) { + // Update the instruction to use the new value. + Use &U = From->use_begin().getUse(); + Instruction *User = cast<Instruction>(U.getUser()); + U = To; + + // See if we can simplify it. + if (Value *V = SimplifyInstruction(User, TD)) { + // Recursively simplify this. + ReplaceAndSimplifyAllUses(User, V, TD); + + // If the recursive simplification ended up revisiting and deleting 'From' + // then we're done. + if (FromHandle == 0) + return; + } + } + From->eraseFromParent(); +} + diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp new file mode 100644 index 0000000..5796c6f --- /dev/null +++ b/lib/Analysis/LazyValueInfo.cpp @@ -0,0 +1,582 @@ +//===- LazyValueInfo.cpp - Value constraint analysis ----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interface for lazy computation of value constraint +// information. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "lazy-value-info" +#include "llvm/Analysis/LazyValueInfo.h" +#include "llvm/Constants.h" +#include "llvm/Instructions.h" +#include "llvm/Analysis/ConstantFolding.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Support/CFG.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/PointerIntPair.h" +#include "llvm/ADT/STLExtras.h" +using namespace llvm; + +char LazyValueInfo::ID = 0; +static RegisterPass<LazyValueInfo> +X("lazy-value-info", "Lazy Value Information Analysis", false, true); + +namespace llvm { + FunctionPass *createLazyValueInfoPass() { return new LazyValueInfo(); } +} + + +//===----------------------------------------------------------------------===// +// LVILatticeVal +//===----------------------------------------------------------------------===// + +/// LVILatticeVal - This is the information tracked by LazyValueInfo for each +/// value. +/// +/// FIXME: This is basically just for bringup, this can be made a lot more rich +/// in the future. +/// +namespace { +class LVILatticeVal { + enum LatticeValueTy { + /// undefined - This LLVM Value has no known value yet. + undefined, + /// constant - This LLVM Value has a specific constant value. + constant, + + /// notconstant - This LLVM value is known to not have the specified value. + notconstant, + + /// overdefined - This instruction is not known to be constant, and we know + /// it has a value. + overdefined + }; + + /// Val: This stores the current lattice value along with the Constant* for + /// the constant if this is a 'constant' or 'notconstant' value. + PointerIntPair<Constant *, 2, LatticeValueTy> Val; + +public: + LVILatticeVal() : Val(0, undefined) {} + + static LVILatticeVal get(Constant *C) { + LVILatticeVal Res; + Res.markConstant(C); + return Res; + } + static LVILatticeVal getNot(Constant *C) { + LVILatticeVal Res; + Res.markNotConstant(C); + return Res; + } + + bool isUndefined() const { return Val.getInt() == undefined; } + bool isConstant() const { return Val.getInt() == constant; } + bool isNotConstant() const { return Val.getInt() == notconstant; } + bool isOverdefined() const { return Val.getInt() == overdefined; } + + Constant *getConstant() const { + assert(isConstant() && "Cannot get the constant of a non-constant!"); + return Val.getPointer(); + } + + Constant *getNotConstant() const { + assert(isNotConstant() && "Cannot get the constant of a non-notconstant!"); + return Val.getPointer(); + } + + /// markOverdefined - Return true if this is a change in status. + bool markOverdefined() { + if (isOverdefined()) + return false; + Val.setInt(overdefined); + return true; + } + + /// markConstant - Return true if this is a change in status. + bool markConstant(Constant *V) { + if (isConstant()) { + assert(getConstant() == V && "Marking constant with different value"); + return false; + } + + assert(isUndefined()); + Val.setInt(constant); + assert(V && "Marking constant with NULL"); + Val.setPointer(V); + return true; + } + + /// markNotConstant - Return true if this is a change in status. + bool markNotConstant(Constant *V) { + if (isNotConstant()) { + assert(getNotConstant() == V && "Marking !constant with different value"); + return false; + } + + if (isConstant()) + assert(getConstant() != V && "Marking not constant with different value"); + else + assert(isUndefined()); + + Val.setInt(notconstant); + assert(V && "Marking constant with NULL"); + Val.setPointer(V); + return true; + } + + /// mergeIn - Merge the specified lattice value into this one, updating this + /// one and returning true if anything changed. + bool mergeIn(const LVILatticeVal &RHS) { + if (RHS.isUndefined() || isOverdefined()) return false; + if (RHS.isOverdefined()) return markOverdefined(); + + if (RHS.isNotConstant()) { + if (isNotConstant()) { + if (getNotConstant() != RHS.getNotConstant() || + isa<ConstantExpr>(getNotConstant()) || + isa<ConstantExpr>(RHS.getNotConstant())) + return markOverdefined(); + return false; + } + if (isConstant()) { + if (getConstant() == RHS.getNotConstant() || + isa<ConstantExpr>(RHS.getNotConstant()) || + isa<ConstantExpr>(getConstant())) + return markOverdefined(); + return markNotConstant(RHS.getNotConstant()); + } + + assert(isUndefined() && "Unexpected lattice"); + return markNotConstant(RHS.getNotConstant()); + } + + // RHS must be a constant, we must be undef, constant, or notconstant. + if (isUndefined()) + return markConstant(RHS.getConstant()); + + if (isConstant()) { + if (getConstant() != RHS.getConstant()) + return markOverdefined(); + return false; + } + + // If we are known "!=4" and RHS is "==5", stay at "!=4". + if (getNotConstant() == RHS.getConstant() || + isa<ConstantExpr>(getNotConstant()) || + isa<ConstantExpr>(RHS.getConstant())) + return markOverdefined(); + return false; + } + +}; + +} // end anonymous namespace. + +namespace llvm { +raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) { + if (Val.isUndefined()) + return OS << "undefined"; + if (Val.isOverdefined()) + return OS << "overdefined"; + + if (Val.isNotConstant()) + return OS << "notconstant<" << *Val.getNotConstant() << '>'; + return OS << "constant<" << *Val.getConstant() << '>'; +} +} + +//===----------------------------------------------------------------------===// +// LazyValueInfoCache Decl +//===----------------------------------------------------------------------===// + +namespace { + /// LazyValueInfoCache - This is the cache kept by LazyValueInfo which + /// maintains information about queries across the clients' queries. + class LazyValueInfoCache { + public: + /// BlockCacheEntryTy - This is a computed lattice value at the end of the + /// specified basic block for a Value* that depends on context. + typedef std::pair<BasicBlock*, LVILatticeVal> BlockCacheEntryTy; + + /// ValueCacheEntryTy - This is all of the cached block information for + /// exactly one Value*. The entries are sorted by the BasicBlock* of the + /// entries, allowing us to do a lookup with a binary search. + typedef std::vector<BlockCacheEntryTy> ValueCacheEntryTy; + + private: + /// ValueCache - This is all of the cached information for all values, + /// mapped from Value* to key information. + DenseMap<Value*, ValueCacheEntryTy> ValueCache; + public: + + /// getValueInBlock - This is the query interface to determine the lattice + /// value for the specified Value* at the end of the specified block. + LVILatticeVal getValueInBlock(Value *V, BasicBlock *BB); + + /// getValueOnEdge - This is the query interface to determine the lattice + /// value for the specified Value* that is true on the specified edge. + LVILatticeVal getValueOnEdge(Value *V, BasicBlock *FromBB,BasicBlock *ToBB); + }; +} // end anonymous namespace + +namespace { + struct BlockCacheEntryComparator { + static int Compare(const void *LHSv, const void *RHSv) { + const LazyValueInfoCache::BlockCacheEntryTy *LHS = + static_cast<const LazyValueInfoCache::BlockCacheEntryTy *>(LHSv); + const LazyValueInfoCache::BlockCacheEntryTy *RHS = + static_cast<const LazyValueInfoCache::BlockCacheEntryTy *>(RHSv); + if (LHS->first < RHS->first) + return -1; + if (LHS->first > RHS->first) + return 1; + return 0; + } + + bool operator()(const LazyValueInfoCache::BlockCacheEntryTy &LHS, + const LazyValueInfoCache::BlockCacheEntryTy &RHS) const { + return LHS.first < RHS.first; + } + }; +} + +//===----------------------------------------------------------------------===// +// LVIQuery Impl +//===----------------------------------------------------------------------===// + +namespace { + /// LVIQuery - This is a transient object that exists while a query is + /// being performed. + /// + /// TODO: Reuse LVIQuery instead of recreating it for every query, this avoids + /// reallocation of the densemap on every query. + class LVIQuery { + typedef LazyValueInfoCache::BlockCacheEntryTy BlockCacheEntryTy; + typedef LazyValueInfoCache::ValueCacheEntryTy ValueCacheEntryTy; + + /// This is the current value being queried for. + Value *Val; + + /// This is all of the cached information about this value. + ValueCacheEntryTy &Cache; + + /// NewBlocks - This is a mapping of the new BasicBlocks which have been + /// added to cache but that are not in sorted order. + DenseMap<BasicBlock*, LVILatticeVal> NewBlockInfo; + public: + + LVIQuery(Value *V, ValueCacheEntryTy &VC) : Val(V), Cache(VC) { + } + + ~LVIQuery() { + // When the query is done, insert the newly discovered facts into the + // cache in sorted order. + if (NewBlockInfo.empty()) return; + + // Grow the cache to exactly fit the new data. + Cache.reserve(Cache.size() + NewBlockInfo.size()); + + // If we only have one new entry, insert it instead of doing a full-on + // sort. + if (NewBlockInfo.size() == 1) { + BlockCacheEntryTy Entry = *NewBlockInfo.begin(); + ValueCacheEntryTy::iterator I = + std::lower_bound(Cache.begin(), Cache.end(), Entry, + BlockCacheEntryComparator()); + assert((I == Cache.end() || I->first != Entry.first) && + "Entry already in map!"); + + Cache.insert(I, Entry); + return; + } + + // TODO: If we only have two new elements, INSERT them both. + + Cache.insert(Cache.end(), NewBlockInfo.begin(), NewBlockInfo.end()); + array_pod_sort(Cache.begin(), Cache.end(), + BlockCacheEntryComparator::Compare); + + } + + LVILatticeVal getBlockValue(BasicBlock *BB); + LVILatticeVal getEdgeValue(BasicBlock *FromBB, BasicBlock *ToBB); + + private: + LVILatticeVal &getCachedEntryForBlock(BasicBlock *BB); + }; +} // end anonymous namespace + +/// getCachedEntryForBlock - See if we already have a value for this block. If +/// so, return it, otherwise create a new entry in the NewBlockInfo map to use. +LVILatticeVal &LVIQuery::getCachedEntryForBlock(BasicBlock *BB) { + + // Do a binary search to see if we already have an entry for this block in + // the cache set. If so, find it. + if (!Cache.empty()) { + ValueCacheEntryTy::iterator Entry = + std::lower_bound(Cache.begin(), Cache.end(), + BlockCacheEntryTy(BB, LVILatticeVal()), + BlockCacheEntryComparator()); + if (Entry != Cache.end() && Entry->first == BB) + return Entry->second; + } + + // Otherwise, check to see if it's in NewBlockInfo or create a new entry if + // not. + return NewBlockInfo[BB]; +} + +LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) { + // See if we already have a value for this block. + LVILatticeVal &BBLV = getCachedEntryForBlock(BB); + + // If we've already computed this block's value, return it. + if (!BBLV.isUndefined()) { + DEBUG(errs() << " reuse BB '" << BB->getName() << "' val=" << BBLV <<'\n'); + return BBLV; + } + + // Otherwise, this is the first time we're seeing this block. Reset the + // lattice value to overdefined, so that cycles will terminate and be + // conservatively correct. + BBLV.markOverdefined(); + + // If V is live into BB, see if our predecessors know anything about it. + Instruction *BBI = dyn_cast<Instruction>(Val); + if (BBI == 0 || BBI->getParent() != BB) { + LVILatticeVal Result; // Start Undefined. + unsigned NumPreds = 0; + + // Loop over all of our predecessors, merging what we know from them into + // result. + for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { + Result.mergeIn(getEdgeValue(*PI, BB)); + + // If we hit overdefined, exit early. The BlockVals entry is already set + // to overdefined. + if (Result.isOverdefined()) { + DEBUG(errs() << " compute BB '" << BB->getName() + << "' - overdefined because of pred.\n"); + return Result; + } + ++NumPreds; + } + + // If this is the entry block, we must be asking about an argument. The + // value is overdefined. + if (NumPreds == 0 && BB == &BB->getParent()->front()) { + assert(isa<Argument>(Val) && "Unknown live-in to the entry block"); + Result.markOverdefined(); + return Result; + } + + // Return the merged value, which is more precise than 'overdefined'. + assert(!Result.isOverdefined()); + return getCachedEntryForBlock(BB) = Result; + } + + // If this value is defined by an instruction in this block, we have to + // process it here somehow or return overdefined. + if (PHINode *PN = dyn_cast<PHINode>(BBI)) { + (void)PN; + // TODO: PHI Translation in preds. + } else { + + } + + DEBUG(errs() << " compute BB '" << BB->getName() + << "' - overdefined because inst def found.\n"); + + LVILatticeVal Result; + Result.markOverdefined(); + return getCachedEntryForBlock(BB) = Result; +} + + +/// getEdgeValue - This method attempts to infer more complex +LVILatticeVal LVIQuery::getEdgeValue(BasicBlock *BBFrom, BasicBlock *BBTo) { + // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we + // know that v != 0. + if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) { + // If this is a conditional branch and only one successor goes to BBTo, then + // we maybe able to infer something from the condition. + if (BI->isConditional() && + BI->getSuccessor(0) != BI->getSuccessor(1)) { + bool isTrueDest = BI->getSuccessor(0) == BBTo; + assert(BI->getSuccessor(!isTrueDest) == BBTo && + "BBTo isn't a successor of BBFrom"); + + // If V is the condition of the branch itself, then we know exactly what + // it is. + if (BI->getCondition() == Val) + return LVILatticeVal::get(ConstantInt::get( + Type::getInt1Ty(Val->getContext()), isTrueDest)); + + // If the condition of the branch is an equality comparison, we may be + // able to infer the value. + if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition())) + if (ICI->isEquality() && ICI->getOperand(0) == Val && + isa<Constant>(ICI->getOperand(1))) { + // We know that V has the RHS constant if this is a true SETEQ or + // false SETNE. + if (isTrueDest == (ICI->getPredicate() == ICmpInst::ICMP_EQ)) + return LVILatticeVal::get(cast<Constant>(ICI->getOperand(1))); + return LVILatticeVal::getNot(cast<Constant>(ICI->getOperand(1))); + } + } + } + + // If the edge was formed by a switch on the value, then we may know exactly + // what it is. + if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) { + // If BBTo is the default destination of the switch, we don't know anything. + // Given a more powerful range analysis we could know stuff. + if (SI->getCondition() == Val && SI->getDefaultDest() != BBTo) { + // We only know something if there is exactly one value that goes from + // BBFrom to BBTo. + unsigned NumEdges = 0; + ConstantInt *EdgeVal = 0; + for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) { + if (SI->getSuccessor(i) != BBTo) continue; + if (NumEdges++) break; + EdgeVal = SI->getCaseValue(i); + } + assert(EdgeVal && "Missing successor?"); + if (NumEdges == 1) + return LVILatticeVal::get(EdgeVal); + } + } + + // Otherwise see if the value is known in the block. + return getBlockValue(BBFrom); +} + + +//===----------------------------------------------------------------------===// +// LazyValueInfoCache Impl +//===----------------------------------------------------------------------===// + +LVILatticeVal LazyValueInfoCache::getValueInBlock(Value *V, BasicBlock *BB) { + // If already a constant, there is nothing to compute. + if (Constant *VC = dyn_cast<Constant>(V)) + return LVILatticeVal::get(VC); + + DEBUG(errs() << "LVI Getting block end value " << *V << " at '" + << BB->getName() << "'\n"); + + LVILatticeVal Result = LVIQuery(V, ValueCache[V]).getBlockValue(BB); + + DEBUG(errs() << " Result = " << Result << "\n"); + return Result; +} + +LVILatticeVal LazyValueInfoCache:: +getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB) { + // If already a constant, there is nothing to compute. + if (Constant *VC = dyn_cast<Constant>(V)) + return LVILatticeVal::get(VC); + + DEBUG(errs() << "LVI Getting edge value " << *V << " from '" + << FromBB->getName() << "' to '" << ToBB->getName() << "'\n"); + LVILatticeVal Result = + LVIQuery(V, ValueCache[V]).getEdgeValue(FromBB, ToBB); + + DEBUG(errs() << " Result = " << Result << "\n"); + + return Result; +} + +//===----------------------------------------------------------------------===// +// LazyValueInfo Impl +//===----------------------------------------------------------------------===// + +bool LazyValueInfo::runOnFunction(Function &F) { + TD = getAnalysisIfAvailable<TargetData>(); + // Fully lazy. + return false; +} + +/// getCache - This lazily constructs the LazyValueInfoCache. +static LazyValueInfoCache &getCache(void *&PImpl) { + if (!PImpl) + PImpl = new LazyValueInfoCache(); + return *static_cast<LazyValueInfoCache*>(PImpl); +} + +void LazyValueInfo::releaseMemory() { + // If the cache was allocated, free it. + if (PImpl) { + delete &getCache(PImpl); + PImpl = 0; + } +} + +Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB) { + LVILatticeVal Result = getCache(PImpl).getValueInBlock(V, BB); + + if (Result.isConstant()) + return Result.getConstant(); + return 0; +} + +/// getConstantOnEdge - Determine whether the specified value is known to be a +/// constant on the specified edge. Return null if not. +Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB, + BasicBlock *ToBB) { + LVILatticeVal Result = getCache(PImpl).getValueOnEdge(V, FromBB, ToBB); + + if (Result.isConstant()) + return Result.getConstant(); + return 0; +} + +/// getPredicateOnEdge - Determine whether the specified value comparison +/// with a constant is known to be true or false on the specified CFG edge. +/// Pred is a CmpInst predicate. +LazyValueInfo::Tristate +LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C, + BasicBlock *FromBB, BasicBlock *ToBB) { + LVILatticeVal Result = getCache(PImpl).getValueOnEdge(V, FromBB, ToBB); + + // If we know the value is a constant, evaluate the conditional. + Constant *Res = 0; + if (Result.isConstant()) { + Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD); + if (ConstantInt *ResCI = dyn_cast_or_null<ConstantInt>(Res)) + return ResCI->isZero() ? False : True; + return Unknown; + } + + if (Result.isNotConstant()) { + // If this is an equality comparison, we can try to fold it knowing that + // "V != C1". + if (Pred == ICmpInst::ICMP_EQ) { + // !C1 == C -> false iff C1 == C. + Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE, + Result.getNotConstant(), C, TD); + if (Res->isNullValue()) + return False; + } else if (Pred == ICmpInst::ICMP_NE) { + // !C1 != C -> true iff C1 == C. + Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE, + Result.getNotConstant(), C, TD); + if (Res->isNullValue()) + return True; + } + return Unknown; + } + + return Unknown; +} + + diff --git a/lib/Analysis/LiveValues.cpp b/lib/Analysis/LiveValues.cpp index 2bbe98a..02ec7d3 100644 --- a/lib/Analysis/LiveValues.cpp +++ b/lib/Analysis/LiveValues.cpp @@ -17,7 +17,9 @@ #include "llvm/Analysis/LoopInfo.h" using namespace llvm; -FunctionPass *llvm::createLiveValuesPass() { return new LiveValues(); } +namespace llvm { + FunctionPass *createLiveValuesPass() { return new LiveValues(); } +} char LiveValues::ID = 0; static RegisterPass<LiveValues> diff --git a/lib/Analysis/LoopInfo.cpp b/lib/Analysis/LoopInfo.cpp index e9256b7..1c614b0 100644 --- a/lib/Analysis/LoopInfo.cpp +++ b/lib/Analysis/LoopInfo.cpp @@ -263,14 +263,13 @@ bool Loop::isLCSSAForm() const { SmallPtrSet<BasicBlock *, 16> LoopBBs(block_begin(), block_end()); for (block_iterator BI = block_begin(), E = block_end(); BI != E; ++BI) { - BasicBlock *BB = *BI; - for (BasicBlock ::iterator I = BB->begin(), E = BB->end(); I != E;++I) + BasicBlock *BB = *BI; + for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;++I) for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI) { BasicBlock *UserBB = cast<Instruction>(*UI)->getParent(); - if (PHINode *P = dyn_cast<PHINode>(*UI)) { + if (PHINode *P = dyn_cast<PHINode>(*UI)) UserBB = P->getIncomingBlock(UI); - } // Check the current block, as a fast-path. Most values are used in // the same block they are defined in. @@ -286,12 +285,14 @@ bool Loop::isLCSSAForm() const { /// the LoopSimplify form transforms loops to, which is sometimes called /// normal form. bool Loop::isLoopSimplifyForm() const { - // Normal-form loops have a preheader. - if (!getLoopPreheader()) - return false; - // Normal-form loops have a single backedge. - if (!getLoopLatch()) - return false; + // Normal-form loops have a preheader, a single backedge, and all of their + // exits have all their predecessors inside the loop. + return getLoopPreheader() && getLoopLatch() && hasDedicatedExits(); +} + +/// hasDedicatedExits - Return true if no exit block for the loop +/// has a predecessor that is outside the loop. +bool Loop::hasDedicatedExits() const { // Sort the blocks vector so that we can use binary search to do quick // lookups. SmallPtrSet<BasicBlock *, 16> LoopBBs(block_begin(), block_end()); diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp index f4eb793..b448628 100644 --- a/lib/Analysis/MemoryBuiltins.cpp +++ b/lib/Analysis/MemoryBuiltins.cpp @@ -16,7 +16,7 @@ #include "llvm/Constants.h" #include "llvm/Instructions.h" #include "llvm/Module.h" -#include "llvm/Analysis/ConstantFolding.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/Target/TargetData.h" using namespace llvm; @@ -87,13 +87,8 @@ const CallInst *llvm::extractMallocCallFromBitCast(const Value *I) { : NULL; } -/// isConstantOne - Return true only if val is constant int 1. -static bool isConstantOne(Value *val) { - return isa<ConstantInt>(val) && cast<ConstantInt>(val)->isOne(); -} - -static Value *isArrayMallocHelper(const CallInst *CI, LLVMContext &Context, - const TargetData *TD) { +static Value *computeArraySize(const CallInst *CI, const TargetData *TD, + bool LookThroughSExt = false) { if (!CI) return NULL; @@ -102,99 +97,27 @@ static Value *isArrayMallocHelper(const CallInst *CI, LLVMContext &Context, if (!T || !T->isSized() || !TD) return NULL; - Value *MallocArg = CI->getOperand(1); - const Type *ArgType = MallocArg->getType(); - ConstantExpr *CO = dyn_cast<ConstantExpr>(MallocArg); - BinaryOperator *BO = dyn_cast<BinaryOperator>(MallocArg); - - unsigned ElementSizeInt = TD->getTypeAllocSize(T); + unsigned ElementSize = TD->getTypeAllocSize(T); if (const StructType *ST = dyn_cast<StructType>(T)) - ElementSizeInt = TD->getStructLayout(ST)->getSizeInBytes(); - Constant *ElementSize = ConstantInt::get(ArgType, ElementSizeInt); - - // First, check if CI is a non-array malloc. - if (CO && CO == ElementSize) - // Match CreateMalloc's use of constant 1 array-size for non-array mallocs. - return ConstantInt::get(ArgType, 1); - - // Second, check if CI is an array malloc whose array size can be determined. - if (isConstantOne(ElementSize)) - return MallocArg; - - if (ConstantInt *CInt = dyn_cast<ConstantInt>(MallocArg)) - if (CInt->getZExtValue() % ElementSizeInt == 0) - return ConstantInt::get(ArgType, CInt->getZExtValue() / ElementSizeInt); + ElementSize = TD->getStructLayout(ST)->getSizeInBytes(); - if (!CO && !BO) - return NULL; - - Value *Op0 = NULL; - Value *Op1 = NULL; - unsigned Opcode = 0; - if (CO && ((CO->getOpcode() == Instruction::Mul) || - (CO->getOpcode() == Instruction::Shl))) { - Op0 = CO->getOperand(0); - Op1 = CO->getOperand(1); - Opcode = CO->getOpcode(); - } - if (BO && ((BO->getOpcode() == Instruction::Mul) || - (BO->getOpcode() == Instruction::Shl))) { - Op0 = BO->getOperand(0); - Op1 = BO->getOperand(1); - Opcode = BO->getOpcode(); - } - - // Determine array size if malloc's argument is the product of a mul or shl. - if (Op0) { - if (Opcode == Instruction::Mul) { - if (Op1 == ElementSize) - // ArraySize * ElementSize - return Op0; - if (Op0 == ElementSize) - // ElementSize * ArraySize - return Op1; - } - if (Opcode == Instruction::Shl) { - ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); - if (!Op1CI) return NULL; - - APInt Op1Int = Op1CI->getValue(); - uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); - Value *Op1Pow = ConstantInt::get(Context, - APInt(Op1Int.getBitWidth(), 0).set(BitToSet)); - if (Op0 == ElementSize) - // ArraySize << log2(ElementSize) - return Op1Pow; - if (Op1Pow == ElementSize) - // ElementSize << log2(ArraySize) - return Op0; - } - } + // If malloc calls' arg can be determined to be a multiple of ElementSize, + // return the multiple. Otherwise, return NULL. + Value *MallocArg = CI->getOperand(1); + Value *Multiple = NULL; + if (ComputeMultiple(MallocArg, ElementSize, Multiple, + LookThroughSExt)) + return Multiple; - // We could not determine the malloc array size from MallocArg. return NULL; } /// isArrayMalloc - Returns the corresponding CallInst if the instruction /// is a call to malloc whose array size can be determined and the array size /// is not constant 1. Otherwise, return NULL. -CallInst *llvm::isArrayMalloc(Value *I, LLVMContext &Context, - const TargetData *TD) { - CallInst *CI = extractMallocCall(I); - Value *ArraySize = isArrayMallocHelper(CI, Context, TD); - - if (ArraySize && - ArraySize != ConstantInt::get(CI->getOperand(1)->getType(), 1)) - return CI; - - // CI is a non-array malloc or we can't figure out that it is an array malloc. - return NULL; -} - -const CallInst *llvm::isArrayMalloc(const Value *I, LLVMContext &Context, - const TargetData *TD) { +const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) { const CallInst *CI = extractMallocCall(I); - Value *ArraySize = isArrayMallocHelper(CI, Context, TD); + Value *ArraySize = computeArraySize(CI, TD); if (ArraySize && ArraySize != ConstantInt::get(CI->getOperand(1)->getType(), 1)) @@ -210,7 +133,7 @@ const CallInst *llvm::isArrayMalloc(const Value *I, LLVMContext &Context, /// 1: PointerType is the bitcast's result type. /// >1: Unique PointerType cannot be determined, return NULL. const PointerType *llvm::getMallocType(const CallInst *CI) { - assert(isMalloc(CI) && "GetMallocType and not malloc call"); + assert(isMalloc(CI) && "getMallocType and not malloc call"); const PointerType *MallocType = NULL; unsigned NumOfBitCastUses = 0; @@ -250,9 +173,10 @@ const Type *llvm::getMallocAllocatedType(const CallInst *CI) { /// then return that multiple. For non-array mallocs, the multiple is /// constant 1. Otherwise, return NULL for mallocs whose array size cannot be /// determined. -Value *llvm::getMallocArraySize(CallInst *CI, LLVMContext &Context, - const TargetData *TD) { - return isArrayMallocHelper(CI, Context, TD); +Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD, + bool LookThroughSExt) { + assert(isMalloc(CI) && "getMallocArraySize and not malloc call"); + return computeArraySize(CI, TD, LookThroughSExt); } //===----------------------------------------------------------------------===// diff --git a/lib/Analysis/PointerTracking.cpp b/lib/Analysis/PointerTracking.cpp index 2251b62..8da07e7 100644 --- a/lib/Analysis/PointerTracking.cpp +++ b/lib/Analysis/PointerTracking.cpp @@ -10,6 +10,7 @@ // This file implements tracking of pointer bounds. // //===----------------------------------------------------------------------===// + #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/LoopInfo.h" @@ -101,7 +102,7 @@ const SCEV *PointerTracking::computeAllocationCount(Value *P, } if (CallInst *CI = extractMallocCall(V)) { - Value *arraySize = getMallocArraySize(CI, P->getContext(), TD); + Value *arraySize = getMallocArraySize(CI, TD); const Type* AllocTy = getMallocAllocatedType(CI); if (!AllocTy || !arraySize) return SE->getCouldNotCompute(); Ty = AllocTy; diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 3e87ca2..ea4af40 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -3811,29 +3811,26 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node /// in the loop has the value PHIVal. If we can't fold this expression for some /// reason, return null. -static Constant *EvaluateExpression(Value *V, Constant *PHIVal) { +static Constant *EvaluateExpression(Value *V, Constant *PHIVal, + const TargetData *TD) { if (isa<PHINode>(V)) return PHIVal; if (Constant *C = dyn_cast<Constant>(V)) return C; if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV; Instruction *I = cast<Instruction>(V); - LLVMContext &Context = I->getParent()->getContext(); std::vector<Constant*> Operands; Operands.resize(I->getNumOperands()); for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { - Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal); + Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD); if (Operands[i] == 0) return 0; } if (const CmpInst *CI = dyn_cast<CmpInst>(I)) - return ConstantFoldCompareInstOperands(CI->getPredicate(), - &Operands[0], Operands.size(), - Context); - else - return ConstantFoldInstOperands(I->getOpcode(), I->getType(), - &Operands[0], Operands.size(), - Context); + return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], + Operands[1], TD); + return ConstantFoldInstOperands(I->getOpcode(), I->getType(), + &Operands[0], Operands.size(), TD); } /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is @@ -3879,7 +3876,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, return RetVal = PHIVal; // Got exit value! // Compute the value of the PHI node for the next iteration. - Constant *NextPHI = EvaluateExpression(BEValue, PHIVal); + Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD); if (NextPHI == PHIVal) return RetVal = NextPHI; // Stopped evolving! if (NextPHI == 0) @@ -3920,7 +3917,7 @@ ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, for (Constant *PHIVal = StartCST; IterationNum != MaxIterations; ++IterationNum) { ConstantInt *CondVal = - dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal)); + dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD)); // Couldn't symbolically evaluate. if (!CondVal) return getCouldNotCompute(); @@ -3931,7 +3928,7 @@ ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, } // Compute the value of the PHI node for the next iteration. - Constant *NextPHI = EvaluateExpression(BEValue, PHIVal); + Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD); if (NextPHI == 0 || NextPHI == PHIVal) return getCouldNotCompute();// Couldn't evaluate or not making progress... PHIVal = NextPHI; @@ -4040,12 +4037,10 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { Constant *C; if (const CmpInst *CI = dyn_cast<CmpInst>(I)) C = ConstantFoldCompareInstOperands(CI->getPredicate(), - &Operands[0], Operands.size(), - getContext()); + Operands[0], Operands[1], TD); else C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), - &Operands[0], Operands.size(), - getContext()); + &Operands[0], Operands.size(), TD); return getSCEV(C); } } diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 5672510..b0e6900 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -789,6 +789,118 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD, return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); } +/// ComputeMultiple - This function computes the integer multiple of Base that +/// equals V. If successful, it returns true and returns the multiple in +/// Multiple. If unsuccessful, it returns false. It looks +/// through SExt instructions only if LookThroughSExt is true. +bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, + bool LookThroughSExt, unsigned Depth) { + const unsigned MaxDepth = 6; + + assert(V && "No Value?"); + assert(Depth <= MaxDepth && "Limit Search Depth"); + assert(V->getType()->isInteger() && "Not integer or pointer type!"); + + const Type *T = V->getType(); + + ConstantInt *CI = dyn_cast<ConstantInt>(V); + + if (Base == 0) + return false; + + if (Base == 1) { + Multiple = V; + return true; + } + + ConstantExpr *CO = dyn_cast<ConstantExpr>(V); + Constant *BaseVal = ConstantInt::get(T, Base); + if (CO && CO == BaseVal) { + // Multiple is 1. + Multiple = ConstantInt::get(T, 1); + return true; + } + + if (CI && CI->getZExtValue() % Base == 0) { + Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); + return true; + } + + if (Depth == MaxDepth) return false; // Limit search depth. + + Operator *I = dyn_cast<Operator>(V); + if (!I) return false; + + switch (I->getOpcode()) { + default: break; + case Instruction::SExt: { + if (!LookThroughSExt) return false; + // otherwise fall through to ZExt + } + case Instruction::ZExt: { + return ComputeMultiple(I->getOperand(0), Base, Multiple, + LookThroughSExt, Depth+1); + } + case Instruction::Shl: + case Instruction::Mul: { + Value *Op0 = I->getOperand(0); + Value *Op1 = I->getOperand(1); + + if (I->getOpcode() == Instruction::Shl) { + ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); + if (!Op1CI) return false; + // Turn Op0 << Op1 into Op0 * 2^Op1 + APInt Op1Int = Op1CI->getValue(); + uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); + Op1 = ConstantInt::get(V->getContext(), + APInt(Op1Int.getBitWidth(), 0).set(BitToSet)); + } + + Value *Mul0 = NULL; + Value *Mul1 = NULL; + bool M0 = ComputeMultiple(Op0, Base, Mul0, + LookThroughSExt, Depth+1); + bool M1 = ComputeMultiple(Op1, Base, Mul1, + LookThroughSExt, Depth+1); + + if (M0) { + if (isa<Constant>(Op1) && isa<Constant>(Mul0)) { + // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) + Multiple = ConstantExpr::getMul(cast<Constant>(Mul0), + cast<Constant>(Op1)); + return true; + } + + if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) + if (Mul0CI->getValue() == 1) { + // V == Base * Op1, so return Op1 + Multiple = Op1; + return true; + } + } + + if (M1) { + if (isa<Constant>(Op0) && isa<Constant>(Mul1)) { + // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) + Multiple = ConstantExpr::getMul(cast<Constant>(Mul1), + cast<Constant>(Op0)); + return true; + } + + if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) + if (Mul1CI->getValue() == 1) { + // V == Base * Op0, so return Op0 + Multiple = Op0; + return true; + } + } + } + } + + // We could not determine if V is a multiple of Base. + return false; +} + /// CannotBeNegativeZero - Return true if we can prove that the specified FP /// value is never equal to -0.0. /// |