diff options
Diffstat (limited to 'lib/CodeGen/CGExprScalar.cpp')
-rw-r--r-- | lib/CodeGen/CGExprScalar.cpp | 402 |
1 files changed, 224 insertions, 178 deletions
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp index c1c252d..f3a5387 100644 --- a/lib/CodeGen/CGExprScalar.cpp +++ b/lib/CodeGen/CGExprScalar.cpp @@ -87,15 +87,16 @@ public: void EmitBinOpCheck(Value *Check, const BinOpInfo &Info); - Value *EmitLoadOfLValue(LValue LV) { - return CGF.EmitLoadOfLValue(LV).getScalarVal(); + Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) { + return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal(); } /// EmitLoadOfLValue - Given an expression with complex type that represents a /// value l-value, this method emits the address of the l-value, then loads /// and returns the result. Value *EmitLoadOfLValue(const Expr *E) { - return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load)); + return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load), + E->getExprLoc()); } /// EmitConversionToBool - Convert the specified expression value to a @@ -161,18 +162,18 @@ public: Value *Visit(Expr *E) { return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); } - + Value *VisitStmt(Stmt *S) { S->dump(CGF.getContext().getSourceManager()); llvm_unreachable("Stmt can't have complex result type!"); } Value *VisitExpr(Expr *S); - + Value *VisitParenExpr(ParenExpr *PE) { - return Visit(PE->getSubExpr()); + return Visit(PE->getSubExpr()); } Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { - return Visit(E->getReplacement()); + return Visit(E->getReplacement()); } Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { return Visit(GE->getResultExpr()); @@ -217,7 +218,7 @@ public: Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) { if (E->isGLValue()) - return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E)); + return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc()); // Otherwise, assume the mapping is the scalar directly. return CGF.getOpaqueRValueMapping(E).getScalarVal(); @@ -227,7 +228,8 @@ public: Value *VisitDeclRefExpr(DeclRefExpr *E) { if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) { if (result.isReference()) - return EmitLoadOfLValue(result.getReferenceLValue(CGF, E)); + return EmitLoadOfLValue(result.getReferenceLValue(CGF, E), + E->getExprLoc()); return result.getValue(); } return EmitLoadOfLValue(E); @@ -243,7 +245,7 @@ public: return EmitLoadOfLValue(E); } Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { - if (E->getMethodDecl() && + if (E->getMethodDecl() && E->getMethodDecl()->getResultType()->isReferenceType()) return EmitLoadOfLValue(E); return CGF.EmitObjCMessageExpr(E).getScalarVal(); @@ -251,12 +253,13 @@ public: Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { LValue LV = CGF.EmitObjCIsaExpr(E); - Value *V = CGF.EmitLoadOfLValue(LV).getScalarVal(); + Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); return V; } Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); + Value *VisitConvertVectorExpr(ConvertVectorExpr *E); Value *VisitMemberExpr(MemberExpr *E); Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { @@ -310,7 +313,7 @@ public: llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); - + Value *VisitUnaryAddrOf(const UnaryOperator *E) { if (isa<MemberPointerType>(E->getType())) // never sugared return CGF.CGM.getMemberPointerConstant(E); @@ -335,12 +338,12 @@ public: Value *VisitUnaryExtension(const UnaryOperator *E) { return Visit(E->getSubExpr()); } - + // C++ Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { return EmitLoadOfLValue(E); } - + Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { return Visit(DAE->getExpr()); } @@ -430,7 +433,7 @@ public: Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); // Check for undefined division and modulus behaviors. - void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, + void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, llvm::Value *Zero,bool isDiv); // Common helper for getting how wide LHS of shift is. static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS); @@ -893,51 +896,43 @@ Value *ScalarExprEmitter::VisitExpr(Expr *E) { Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { // Vector Mask Case - if (E->getNumSubExprs() == 2 || + if (E->getNumSubExprs() == 2 || (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) { Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); Value *Mask; - + llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType()); unsigned LHSElts = LTy->getNumElements(); if (E->getNumSubExprs() == 3) { Mask = CGF.EmitScalarExpr(E->getExpr(2)); - + // Shuffle LHS & RHS into one input vector. SmallVector<llvm::Constant*, 32> concat; for (unsigned i = 0; i != LHSElts; ++i) { concat.push_back(Builder.getInt32(2*i)); concat.push_back(Builder.getInt32(2*i+1)); } - + Value* CV = llvm::ConstantVector::get(concat); LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat"); LHSElts *= 2; } else { Mask = RHS; } - + llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType()); llvm::Constant* EltMask; - - // Treat vec3 like vec4. - if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) - EltMask = llvm::ConstantInt::get(MTy->getElementType(), - (1 << llvm::Log2_32(LHSElts+2))-1); - else if ((LHSElts == 3) && (E->getNumSubExprs() == 2)) - EltMask = llvm::ConstantInt::get(MTy->getElementType(), - (1 << llvm::Log2_32(LHSElts+1))-1); - else - EltMask = llvm::ConstantInt::get(MTy->getElementType(), - (1 << llvm::Log2_32(LHSElts))-1); - + + EltMask = llvm::ConstantInt::get(MTy->getElementType(), + llvm::NextPowerOf2(LHSElts-1)-1); + // Mask off the high bits of each shuffle index. Value *MaskBits = llvm::ConstantVector::getSplat(MTy->getNumElements(), EltMask); Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); - + // newv = undef // mask = mask & maskbits // for each elt @@ -945,43 +940,110 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { // x = extract val n // newv = insert newv, x, i llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(), - MTy->getNumElements()); + MTy->getNumElements()); Value* NewV = llvm::UndefValue::get(RTy); for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { Value *IIndx = Builder.getInt32(i); Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx"); Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext"); - - // Handle vec3 special since the index will be off by one for the RHS. - if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) { - Value *cmpIndx, *newIndx; - cmpIndx = Builder.CreateICmpUGT(Indx, Builder.getInt32(3), - "cmp_shuf_idx"); - newIndx = Builder.CreateSub(Indx, Builder.getInt32(1), "shuf_idx_adj"); - Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx"); - } + Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins"); } return NewV; } - + Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); - - // Handle vec3 special since the index will be off by one for the RHS. - llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType()); + SmallVector<llvm::Constant*, 32> indices; - for (unsigned i = 2; i < E->getNumSubExprs(); i++) { - unsigned Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2); - if (VTy->getNumElements() == 3 && Idx > 3) - Idx -= 1; - indices.push_back(Builder.getInt32(Idx)); + for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { + llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2); + // Check for -1 and output it as undef in the IR. + if (Idx.isSigned() && Idx.isAllOnesValue()) + indices.push_back(llvm::UndefValue::get(CGF.Int32Ty)); + else + indices.push_back(Builder.getInt32(Idx.getZExtValue())); } Value *SV = llvm::ConstantVector::get(indices); return Builder.CreateShuffleVector(V1, V2, SV, "shuffle"); } + +Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) { + QualType SrcType = E->getSrcExpr()->getType(), + DstType = E->getType(); + + Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); + + SrcType = CGF.getContext().getCanonicalType(SrcType); + DstType = CGF.getContext().getCanonicalType(DstType); + if (SrcType == DstType) return Src; + + assert(SrcType->isVectorType() && + "ConvertVector source type must be a vector"); + assert(DstType->isVectorType() && + "ConvertVector destination type must be a vector"); + + llvm::Type *SrcTy = Src->getType(); + llvm::Type *DstTy = ConvertType(DstType); + + // Ignore conversions like int -> uint. + if (SrcTy == DstTy) + return Src; + + QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(), + DstEltType = DstType->getAs<VectorType>()->getElementType(); + + assert(SrcTy->isVectorTy() && + "ConvertVector source IR type must be a vector"); + assert(DstTy->isVectorTy() && + "ConvertVector destination IR type must be a vector"); + + llvm::Type *SrcEltTy = SrcTy->getVectorElementType(), + *DstEltTy = DstTy->getVectorElementType(); + + if (DstEltType->isBooleanType()) { + assert((SrcEltTy->isFloatingPointTy() || + isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"); + + llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy); + if (SrcEltTy->isFloatingPointTy()) { + return Builder.CreateFCmpUNE(Src, Zero, "tobool"); + } else { + return Builder.CreateICmpNE(Src, Zero, "tobool"); + } + } + + // We have the arithmetic types: real int/float. + Value *Res = NULL; + + if (isa<llvm::IntegerType>(SrcEltTy)) { + bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType(); + if (isa<llvm::IntegerType>(DstEltTy)) + Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); + else if (InputSigned) + Res = Builder.CreateSIToFP(Src, DstTy, "conv"); + else + Res = Builder.CreateUIToFP(Src, DstTy, "conv"); + } else if (isa<llvm::IntegerType>(DstEltTy)) { + assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion"); + if (DstEltType->isSignedIntegerOrEnumerationType()) + Res = Builder.CreateFPToSI(Src, DstTy, "conv"); + else + Res = Builder.CreateFPToUI(Src, DstTy, "conv"); + } else { + assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && + "Unknown real conversion"); + if (DstEltTy->getTypeID() < SrcEltTy->getTypeID()) + Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); + else + Res = Builder.CreateFPExt(Src, DstTy, "conv"); + } + + return Res; +} + Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { llvm::APSInt Value; if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) { @@ -992,18 +1054,6 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { return Builder.getInt(Value); } - // Emit debug info for aggregate now, if it was delayed to reduce - // debug info size. - CGDebugInfo *DI = CGF.getDebugInfo(); - if (DI && - CGF.CGM.getCodeGenOpts().getDebugInfo() - == CodeGenOptions::LimitedDebugInfo) { - QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType(); - if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) - if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl())) - DI->getOrCreateRecordType(PTy->getPointeeType(), - M->getParent()->getLocation()); - } return EmitLoadOfLValue(E); } @@ -1023,7 +1073,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { Value *Idx = Visit(E->getIdx()); QualType IdxTy = E->getIdx()->getType(); - if (CGF.SanOpts->Bounds) + if (CGF.SanOpts->ArrayBounds) CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true); bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); @@ -1034,7 +1084,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off, llvm::Type *I32Ty) { int MV = SVI->getMaskValue(Idx); - if (MV == -1) + if (MV == -1) return llvm::UndefValue::get(I32Ty); return llvm::ConstantInt::get(I32Ty, Off+MV); } @@ -1044,13 +1094,13 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { (void)Ignore; assert (Ignore == false && "init list ignored"); unsigned NumInitElements = E->getNumInits(); - + if (E->hadArrayRangeDesignator()) CGF.ErrorUnsupported(E, "GNU array range designator extension"); - + llvm::VectorType *VType = dyn_cast<llvm::VectorType>(ConvertType(E->getType())); - + if (!VType) { if (NumInitElements == 0) { // C++11 value-initialization for the scalar. @@ -1059,10 +1109,10 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // We have a scalar in braces. Just use the first element. return Visit(E->getInit(0)); } - + unsigned ResElts = VType->getNumElements(); - - // Loop over initializers collecting the Value for each, and remembering + + // Loop over initializers collecting the Value for each, and remembering // whether the source was swizzle (ExtVectorElementExpr). This will allow // us to fold the shuffle for the swizzle into the shuffle for the vector // initializer, since LLVM optimizers generally do not want to touch @@ -1074,11 +1124,11 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { Expr *IE = E->getInit(i); Value *Init = Visit(IE); SmallVector<llvm::Constant*, 16> Args; - + llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); - + // Handle scalar elements. If the scalar initializer is actually one - // element of a different vector of the same width, use shuffle instead of + // element of a different vector of the same width, use shuffle instead of // extract+insert. if (!VVT) { if (isa<ExtVectorElementExpr>(IE)) { @@ -1121,10 +1171,10 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { ++CurIdx; continue; } - + unsigned InitElts = VVT->getNumElements(); - // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's + // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's // input is the same width as the vector being constructed, generate an // optimized shuffle of the swizzle input into the result. unsigned Offset = (CurIdx == 0) ? 0 : ResElts; @@ -1132,7 +1182,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); Value *SVOp = SVI->getOperand(0); llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType()); - + if (OpTy->getNumElements() == ResElts) { for (unsigned j = 0; j != CurIdx; ++j) { // If the current vector initializer is a shuffle with undef, merge @@ -1182,11 +1232,11 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { VIsUndefShuffle = isa<llvm::UndefValue>(Init); CurIdx += InitElts; } - + // FIXME: evaluate codegen vs. shuffling against constant null vector. // Emit remaining default initializers. llvm::Type *EltTy = VType->getElementType(); - + // Emit remaining default initializers for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { Value *Idx = Builder.getInt32(CurIdx); @@ -1201,12 +1251,12 @@ static bool ShouldNullCheckClassCastValue(const CastExpr *CE) { if (CE->getCastKind() == CK_UncheckedDerivedToBase) return false; - + if (isa<CXXThisExpr>(E)) { // We always assume that 'this' is never null. return false; } - + if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { // And that glvalue casts are never null. if (ICE->getValueKind() != VK_RValue) @@ -1223,7 +1273,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { Expr *E = CE->getSubExpr(); QualType DestTy = CE->getType(); CastKind Kind = CE->getCastKind(); - + if (!DestTy->isVoidType()) TestAndClearIgnoreResultAssign(); @@ -1235,12 +1285,13 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_BuiltinFnToFnPtr: llvm_unreachable("builtin functions are handled elsewhere"); - case CK_LValueBitCast: + case CK_LValueBitCast: case CK_ObjCObjectLValueCast: { Value *V = EmitLValue(E).getAddress(); - V = Builder.CreateBitCast(V, + V = Builder.CreateBitCast(V, ConvertType(CGF.getContext().getPointerType(DestTy))); - return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy)); + return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy), + CE->getExprLoc()); } case CK_CPointerToObjCPointerCast: @@ -1262,15 +1313,18 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { llvm::Value *V = Visit(E); + llvm::Value *Derived = + CGF.GetAddressOfDerivedClass(V, DerivedClassDecl, + CE->path_begin(), CE->path_end(), + ShouldNullCheckClassCastValue(CE)); + // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is // performed and the object is not of the derived type. if (CGF.SanitizePerformTypeCheck) CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), - V, DestTy->getPointeeType()); + Derived, DestTy->getPointeeType()); - return CGF.GetAddressOfDerivedClass(V, DerivedClassDecl, - CE->path_begin(), CE->path_end(), - ShouldNullCheckClassCastValue(CE)); + return Derived; } case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { @@ -1278,7 +1332,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { E->getType()->getPointeeCXXRecordDecl(); assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!"); - return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl, + return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl, CE->path_begin(), CE->path_end(), ShouldNullCheckClassCastValue(CE)); } @@ -1330,7 +1384,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_BaseToDerivedMemberPointer: case CK_DerivedToBaseMemberPointer: { Value *Src = Visit(E); - + // Note that the AST doesn't distinguish between checked and // unchecked member pointer conversions, so we always have to // implement checked conversions here. This is inefficient when @@ -1354,7 +1408,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_CopyAndAutoreleaseBlockObject: return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType()); - + case CK_FloatingRealToComplex: case CK_FloatingComplexCast: case CK_IntegralRealToComplex: @@ -1442,8 +1496,12 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { CodeGenFunction::StmtExprEvaluation eval(CGF); - return CGF.EmitCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType()) - .getScalarVal(); + llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), + !E->getType()->isVoidType()); + if (!RetAlloca) + return 0; + return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()), + E->getExprLoc()); } //===----------------------------------------------------------------------===// @@ -1477,7 +1535,7 @@ EmitAddConsiderOverflowBehavior(const UnaryOperator *E, llvm::Value * ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre) { - + QualType type = E->getSubExpr()->getType(); llvm::PHINode *atomicPHI = 0; llvm::Value *value; @@ -1503,7 +1561,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, } // Special case for atomic increment / decrement on integers, emit // atomicrmw instructions. We skip this if we want to be doing overflow - // checking, and fall into the slow path with the atomic cmpxchg loop. + // checking, and fall into the slow path with the atomic cmpxchg loop. if (!type->isBooleanType() && type->isIntegerType() && !(type->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow) && @@ -1519,7 +1577,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, LV.getAddress(), amt, llvm::SequentiallyConsistent); return isPre ? Builder.CreateBinOp(op, old, amt) : old; } - value = EmitLoadOfLValue(LV); + value = EmitLoadOfLValue(LV, E->getExprLoc()); input = value; // For every other atomic operation, we need to emit a load-op-cmpxchg loop llvm::BasicBlock *startBB = Builder.GetInsertBlock(); @@ -1531,7 +1589,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, atomicPHI->addIncoming(value, startBB); value = atomicPHI; } else { - value = EmitLoadOfLValue(LV); + value = EmitLoadOfLValue(LV, E->getExprLoc()); input = value; } @@ -1569,7 +1627,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, value = EmitOverflowCheckedBinOp(BinOp); } else value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); - + // Next most common: pointer increment. } else if (const PointerType *ptr = type->getAs<PointerType>()) { QualType type = ptr->getPointeeType(); @@ -1583,7 +1641,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, value = Builder.CreateGEP(value, numElts, "vla.inc"); else value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc"); - + // Arithmetic on function pointers (!) is just +-1. } else if (type->isFunctionType()) { llvm::Value *amt = Builder.getInt32(amount); @@ -1665,7 +1723,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr"); value = Builder.CreateBitCast(value, input->getType()); } - + if (atomicPHI) { llvm::BasicBlock *opBB = Builder.GetInsertBlock(); llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); @@ -1696,10 +1754,10 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { // Emit unary minus with EmitSub so we handle overflow cases etc. BinOpInfo BinOp; BinOp.RHS = Visit(E->getSubExpr()); - + if (BinOp.RHS->getType()->isFPOrFPVectorTy()) BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType()); - else + else BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); BinOp.Ty = E->getType(); BinOp.Opcode = BO_Sub; @@ -1726,7 +1784,7 @@ Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); } - + // Compare operand to zero. Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); @@ -1814,7 +1872,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) { // Save the element type. CurrentType = ON.getBase()->getType(); - + // Compute the offset to the base. const RecordType *BaseRT = CurrentType->getAs<RecordType>(); CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl()); @@ -1873,7 +1931,8 @@ Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { // Note that we have to ask E because Op might be an l-value that // this won't work for, e.g. an Obj-C property. if (E->isGLValue()) - return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal(); + return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), + E->getExprLoc()).getScalarVal(); // Otherwise, calculate and project. return CGF.EmitComplexExpr(Op, false, true).first; @@ -1889,7 +1948,8 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { // Note that we have to ask E because Op might be an l-value that // this won't work for, e.g. an Obj-C property. if (Op->isGLValue()) - return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal(); + return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), + E->getExprLoc()).getScalarVal(); // Otherwise, calculate and project. return CGF.EmitComplexExpr(Op, true, false).second; @@ -1926,17 +1986,10 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( Value *&Result) { QualType LHSTy = E->getLHS()->getType(); BinOpInfo OpInfo; - - if (E->getComputationResultType()->isAnyComplexType()) { - // This needs to go through the complex expression emitter, but it's a tad - // complicated to do that... I'm leaving it out for now. (Note that we do - // actually need the imaginary part of the RHS for multiplication and - // division.) - CGF.ErrorUnsupported(E, "complex compound assignment"); - Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); - return LValue(); - } - + + if (E->getComputationResultType()->isAnyComplexType()) + return CGF.EmitScalarCompooundAssignWithComplex(E, Result); + // Emit the RHS first. __block variables need to have the rhs evaluated // first, plus this should improve codegen a little. OpInfo.RHS = Visit(E->getRHS()); @@ -1993,7 +2046,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( // floating point environment in the loop. llvm::BasicBlock *startBB = Builder.GetInsertBlock(); llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); - OpInfo.LHS = EmitLoadOfLValue(LHSLV); + OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type); Builder.CreateBr(opBB); Builder.SetInsertPoint(opBB); @@ -2002,14 +2055,14 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( OpInfo.LHS = atomicPHI; } else - OpInfo.LHS = EmitLoadOfLValue(LHSLV); + OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType()); // Expand the binary operator. Result = (this->*Func)(OpInfo); - + // Convert the result back to the LHS type. Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy); @@ -2024,7 +2077,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( Builder.SetInsertPoint(contBB); return LHSLV; } - + // Store the result value into the LHS lvalue. Bit-fields are handled // specially because the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after the @@ -2056,7 +2109,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, return RHS; // Otherwise, reload the value. - return EmitLoadOfLValue(LHS); + return EmitLoadOfLValue(LHS, E->getExprLoc()); } void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( @@ -2236,7 +2289,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, // Must have binary (not unary) expr here. Unary pointer // increment/decrement doesn't use this path. const BinaryOperator *expr = cast<BinaryOperator>(op.E); - + Value *pointer = op.LHS; Expr *pointerOperand = expr->getLHS(); Value *index = op.RHS; @@ -2261,7 +2314,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, if (isSubtraction) index = CGF.Builder.CreateNeg(index, "idx.neg"); - if (CGF.SanOpts->Bounds) + if (CGF.SanOpts->ArrayBounds) CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(), /*Accessed*/ false); @@ -2325,7 +2378,7 @@ static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd) { assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set."); - + Value *MulOp0 = MulOp->getOperand(0); Value *MulOp1 = MulOp->getOperand(1); if (negMul) { @@ -2355,7 +2408,7 @@ static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend, // Checks that (a) the operation is fusable, and (b) -ffp-contract=on. // Does NOT check the type of the operation - it's assumed that this function // will be called from contexts where it's known that the type is contractable. -static Value* tryEmitFMulAdd(const BinOpInfo &op, +static Value* tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false) { @@ -2503,7 +2556,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { divisor = CGF.CGM.getSize(elementSize); } - + // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since // pointer difference in C is only defined in the case where both operands // are pointing to elements of an array. @@ -2809,7 +2862,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { return RHS; // Otherwise, reload the value. - return EmitLoadOfLValue(LHS); + return EmitLoadOfLValue(LHS, E->getExprLoc()); } Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { @@ -2828,9 +2881,9 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { Value *And = Builder.CreateAnd(LHS, RHS); return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); } - + llvm::Type *ResTy = ConvertType(E->getType()); - + // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. // If we have 1 && X, just emit X without inserting the control flow. bool LHSCondVal; @@ -2899,9 +2952,9 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { Value *Or = Builder.CreateOr(LHS, RHS); return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); } - + llvm::Type *ResTy = ConvertType(E->getType()); - + // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. // If we have 0 || X, just emit X without inserting the control flow. bool LHSCondVal; @@ -2970,22 +3023,15 @@ Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { /// flow into selects in some cases. static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, CodeGenFunction &CGF) { - E = E->IgnoreParens(); - // Anything that is an integer or floating point constant is fine. - if (E->isConstantInitializer(CGF.getContext(), false)) - return true; - - // Non-volatile automatic variables too, to get "cond ? X : Y" where - // X and Y are local variables. - if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) - if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) - if (VD->hasLocalStorage() && !(CGF.getContext() - .getCanonicalType(VD->getType()) - .isVolatileQualified())) - return true; - - return false; + return E->IgnoreParens()->isEvaluatable(CGF.getContext()); + + // Even non-volatile automatic variables can't be evaluated unconditionally. + // Referencing a thread_local may cause non-trivial initialization work to + // occur. If we're inside a lambda and one of the variables is from the scope + // outside the lambda, that function may have returned already. Reading its + // locals is a bad idea. Also, these reads may introduce races there didn't + // exist in the source-level program. } @@ -3023,26 +3069,26 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { // OpenCL: If the condition is a vector, we can treat this condition like // the select function. - if (CGF.getLangOpts().OpenCL + if (CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) { llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); llvm::Value *LHS = Visit(lhsExpr); llvm::Value *RHS = Visit(rhsExpr); - + llvm::Type *condType = ConvertType(condExpr->getType()); llvm::VectorType *vecTy = cast<llvm::VectorType>(condType); - - unsigned numElem = vecTy->getNumElements(); + + unsigned numElem = vecTy->getNumElements(); llvm::Type *elemType = vecTy->getElementType(); - + llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); - llvm::Value *tmp = Builder.CreateSExt(TestMSB, + llvm::Value *tmp = Builder.CreateSExt(TestMSB, llvm::VectorType::get(elemType, - numElem), + numElem), "sext"); llvm::Value *tmp2 = Builder.CreateNot(tmp); - + // Cast float to int to perform ANDs if necessary. llvm::Value *RHSTmp = RHS; llvm::Value *LHSTmp = LHS; @@ -3053,7 +3099,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); wasCast = true; } - + llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); @@ -3062,7 +3108,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { return tmp5; } - + // If this is a really simple expression (like x ? 4 : 5), emit this as a // select instead of as control flow. We can only do this if it is cheap and // safe to evaluate the LHS and RHS unconditionally. @@ -3116,7 +3162,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { } Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { - return Visit(E->getChosenSubExpr(CGF.getContext())); + return Visit(E->getChosenSubExpr()); } Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { @@ -3138,49 +3184,49 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); llvm::Type *DstTy = ConvertType(E->getType()); - + // Going from vec4->vec3 or vec3->vec4 is a special case and requires // a shuffle vector instead of a bitcast. llvm::Type *SrcTy = Src->getType(); if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) { unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements(); unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements(); - if ((numElementsDst == 3 && numElementsSrc == 4) + if ((numElementsDst == 3 && numElementsSrc == 4) || (numElementsDst == 4 && numElementsSrc == 3)) { - - + + // In the case of going from int4->float3, a bitcast is needed before // doing a shuffle. - llvm::Type *srcElemTy = + llvm::Type *srcElemTy = cast<llvm::VectorType>(SrcTy)->getElementType(); - llvm::Type *dstElemTy = + llvm::Type *dstElemTy = cast<llvm::VectorType>(DstTy)->getElementType(); - + if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy()) || (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) { // Create a float type of the same size as the source or destination. llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy, numElementsSrc); - + Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast"); } - + llvm::Value *UnV = llvm::UndefValue::get(Src->getType()); - + SmallVector<llvm::Constant*, 3> Args; Args.push_back(Builder.getInt32(0)); Args.push_back(Builder.getInt32(1)); Args.push_back(Builder.getInt32(2)); - + if (numElementsDst == 4) Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); - + llvm::Constant *Mask = llvm::ConstantVector::get(Args); - + return Builder.CreateShuffleVector(Src, UnV, Mask, "astype"); } } - + return Builder.CreateBitCast(Src, DstTy, "astype"); } @@ -3248,14 +3294,14 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { llvm::Value *Src = EmitScalarExpr(BaseExpr); Builder.CreateStore(Src, V); V = ScalarExprEmitter(*this).EmitLoadOfLValue( - MakeNaturalAlignAddrLValue(V, E->getType())); + MakeNaturalAlignAddrLValue(V, E->getType()), E->getExprLoc()); } else { if (E->isArrow()) V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr); else V = EmitLValue(BaseExpr).getAddress(); } - + // build Class* type ClassPtrTy = ClassPtrTy->getPointerTo(); V = Builder.CreateBitCast(V, ClassPtrTy); @@ -3283,7 +3329,7 @@ LValue CodeGenFunction::EmitCompoundAssignmentLValue( COMPOUND_OP(Xor); COMPOUND_OP(Or); #undef COMPOUND_OP - + case BO_PtrMemD: case BO_PtrMemI: case BO_Mul: @@ -3308,6 +3354,6 @@ LValue CodeGenFunction::EmitCompoundAssignmentLValue( case BO_Comma: llvm_unreachable("Not valid compound assignment operators"); } - + llvm_unreachable("Unhandled compound assignment operator"); } |