diff options
author | dim <dim@FreeBSD.org> | 2013-04-08 18:45:10 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2013-04-08 18:45:10 +0000 |
commit | c72c57c9e9b69944e3e009cd5e209634839581d3 (patch) | |
tree | 4fc2f184c499d106f29a386c452b49e5197bf63d /lib/CodeGen/CGExprScalar.cpp | |
parent | 5b20025c30d23d521e12c1f33ec8fa6b821952cd (diff) | |
download | FreeBSD-src-c72c57c9e9b69944e3e009cd5e209634839581d3.zip FreeBSD-src-c72c57c9e9b69944e3e009cd5e209634839581d3.tar.gz |
Vendor import of clang trunk r178860:
http://llvm.org/svn/llvm-project/cfe/trunk@178860
Diffstat (limited to 'lib/CodeGen/CGExprScalar.cpp')
-rw-r--r-- | lib/CodeGen/CGExprScalar.cpp | 478 |
1 files changed, 331 insertions, 147 deletions
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp index b429b1d..ffd0eb5 100644 --- a/lib/CodeGen/CGExprScalar.cpp +++ b/lib/CodeGen/CGExprScalar.cpp @@ -11,24 +11,24 @@ // //===----------------------------------------------------------------------===// -#include "clang/Frontend/CodeGenOptions.h" #include "CodeGenFunction.h" #include "CGCXXABI.h" +#include "CGDebugInfo.h" #include "CGObjCRuntime.h" #include "CodeGenModule.h" -#include "CGDebugInfo.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/TargetInfo.h" -#include "llvm/Constants.h" -#include "llvm/Function.h" -#include "llvm/GlobalVariable.h" -#include "llvm/Intrinsics.h" -#include "llvm/Module.h" +#include "clang/Frontend/CodeGenOptions.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Module.h" #include "llvm/Support/CFG.h" -#include "llvm/DataLayout.h" #include <cstdarg> using namespace clang; @@ -266,7 +266,7 @@ public: Value *VisitInitListExpr(InitListExpr *E); Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { - return CGF.CGM.EmitNullConstant(E->getType()); + return EmitNullValue(E->getType()); } Value *VisitExplicitCastExpr(ExplicitCastExpr *E) { if (E->getType()->isVariablyModifiedType()) @@ -406,7 +406,7 @@ public: case LangOptions::SOB_Defined: return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); case LangOptions::SOB_Undefined: - if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow) + if (!CGF.SanOpts->SignedIntegerOverflow) return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); // Fall through. case LangOptions::SOB_Trapping: @@ -414,6 +414,9 @@ public: } } + if (Ops.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow) + return EmitOverflowCheckedBinOp(Ops); + if (Ops.LHS->getType()->isFPOrFPVectorTy()) return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); @@ -425,6 +428,8 @@ public: // Check for undefined division and modulus behaviors. void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, llvm::Value *Zero,bool isDiv); + // Common helper for getting how wide LHS of shift is. + static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS); Value *EmitDiv(const BinOpInfo &Ops); Value *EmitRem(const BinOpInfo &Ops); Value *EmitAdd(const BinOpInfo &Ops); @@ -578,62 +583,93 @@ void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc, Check = Builder.CreateAnd(GE, LE); } } else { - // Floating-point to integer or floating-point to floating-point. This has - // undefined behavior if the source is +-Inf, NaN, or doesn't fit into the - // destination type. const llvm::fltSemantics &SrcSema = CGF.getContext().getFloatTypeSemantics(OrigSrcType); - APFloat MaxSrc(SrcSema, APFloat::uninitialized); - APFloat MinSrc(SrcSema, APFloat::uninitialized); - if (isa<llvm::IntegerType>(DstTy)) { + // Floating-point to integer. This has undefined behavior if the source is + // +-Inf, NaN, or doesn't fit into the destination type (after truncation + // to an integer). unsigned Width = CGF.getContext().getIntWidth(DstType); bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType(); APSInt Min = APSInt::getMinValue(Width, Unsigned); + APFloat MinSrc(SrcSema, APFloat::uninitialized); if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) & APFloat::opOverflow) // Don't need an overflow check for lower bound. Just check for // -Inf/NaN. - MinSrc = APFloat::getLargest(SrcSema, true); + MinSrc = APFloat::getInf(SrcSema, true); + else + // Find the largest value which is too small to represent (before + // truncation toward zero). + MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative); APSInt Max = APSInt::getMaxValue(Width, Unsigned); + APFloat MaxSrc(SrcSema, APFloat::uninitialized); if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) & APFloat::opOverflow) // Don't need an overflow check for upper bound. Just check for // +Inf/NaN. - MaxSrc = APFloat::getLargest(SrcSema, false); + MaxSrc = APFloat::getInf(SrcSema, false); + else + // Find the smallest value which is too large to represent (before + // truncation toward zero). + MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive); + + // If we're converting from __half, convert the range to float to match + // the type of src. + if (OrigSrcType->isHalfType()) { + const llvm::fltSemantics &Sema = + CGF.getContext().getFloatTypeSemantics(SrcType); + bool IsInexact; + MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); + MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); + } + + llvm::Value *GE = + Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc)); + llvm::Value *LE = + Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc)); + Check = Builder.CreateAnd(GE, LE); } else { + // FIXME: Maybe split this sanitizer out from float-cast-overflow. + // + // Floating-point to floating-point. This has undefined behavior if the + // source is not in the range of representable values of the destination + // type. The C and C++ standards are spectacularly unclear here. We + // diagnose finite out-of-range conversions, but allow infinities and NaNs + // to convert to the corresponding value in the smaller type. + // + // C11 Annex F gives all such conversions defined behavior for IEC 60559 + // conforming implementations. Unfortunately, LLVM's fptrunc instruction + // does not. + + // Converting from a lower rank to a higher rank can never have + // undefined behavior, since higher-rank types must have a superset + // of values of lower-rank types. + if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1) + return; + + assert(!OrigSrcType->isHalfType() && + "should not check conversion from __half, it has the lowest rank"); + const llvm::fltSemantics &DstSema = CGF.getContext().getFloatTypeSemantics(DstType); - bool IsInexact; - - MinSrc = APFloat::getLargest(DstSema, true); - if (MinSrc.convert(SrcSema, APFloat::rmTowardZero, &IsInexact) & - APFloat::opOverflow) - MinSrc = APFloat::getLargest(SrcSema, true); + APFloat MinBad = APFloat::getLargest(DstSema, false); + APFloat MaxBad = APFloat::getInf(DstSema, false); - MaxSrc = APFloat::getLargest(DstSema, false); - if (MaxSrc.convert(SrcSema, APFloat::rmTowardZero, &IsInexact) & - APFloat::opOverflow) - MaxSrc = APFloat::getLargest(SrcSema, false); - } - - // If we're converting from __half, convert the range to float to match - // the type of src. - if (OrigSrcType->isHalfType()) { - const llvm::fltSemantics &Sema = - CGF.getContext().getFloatTypeSemantics(SrcType); bool IsInexact; - MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); - MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); + MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact); + MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact); + + Value *AbsSrc = CGF.EmitNounwindRuntimeCall( + CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src); + llvm::Value *GE = + Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad)); + llvm::Value *LE = + Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad)); + Check = Builder.CreateNot(Builder.CreateAnd(GE, LE)); } - - llvm::Value *GE = - Builder.CreateFCmpOGE(Src, llvm::ConstantFP::get(VMContext, MinSrc)); - llvm::Value *LE = - Builder.CreateFCmpOLE(Src, llvm::ConstantFP::get(VMContext, MaxSrc)); - Check = Builder.CreateAnd(GE, LE); } // FIXME: Provide a SourceLocation. @@ -641,7 +677,8 @@ void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc, CGF.EmitCheckTypeDescriptor(OrigSrcType), CGF.EmitCheckTypeDescriptor(DstType) }; - CGF.EmitCheck(Check, "float_cast_overflow", StaticArgs, OrigSrc); + CGF.EmitCheck(Check, "float_cast_overflow", StaticArgs, OrigSrc, + CodeGenFunction::CRK_Recoverable); } /// EmitScalarConversion - Emit a conversion from the specified type to the @@ -658,9 +695,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, QualType OrigSrcType = SrcType; llvm::Type *SrcTy = Src->getType(); - // Floating casts might be a bit special: if we're doing casts to / from half - // FP, we should go via special intrinsics. - if (SrcType->isHalfType()) { + // If casting to/from storage-only half FP, use special intrinsics. + if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { Src = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16), Src); SrcType = CGF.getContext().FloatTy; SrcTy = CGF.FloatTy; @@ -707,17 +743,9 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType(); llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy); - // Insert the element in element zero of an undef vector - llvm::Value *UnV = llvm::UndefValue::get(DstTy); - llvm::Value *Idx = Builder.getInt32(0); - UnV = Builder.CreateInsertElement(UnV, Elt, Idx); - // Splat the element across to all elements unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); - llvm::Constant *Mask = llvm::ConstantVector::getSplat(NumElements, - Builder.getInt32(0)); - llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); - return Yay; + return Builder.CreateVectorSplat(NumElements, Elt, "splat"); } // Allow bitcast from vector to integer/fp of the same size. @@ -731,12 +759,13 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, // An overflowing conversion has undefined behavior if either the source type // or the destination type is a floating-point type. - if (CGF.getLangOpts().SanitizeFloatCastOverflow && + if (CGF.SanOpts->FloatCastOverflow && (OrigSrcType->isFloatingType() || DstType->isFloatingType())) - EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy); + EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, + DstTy); // Cast to half via float - if (DstType->isHalfType()) + if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) DstTy = CGF.FloatTy; if (isa<llvm::IntegerType>(SrcTy)) { @@ -777,7 +806,7 @@ Value *ScalarExprEmitter:: EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy) { // Get the source element type. - SrcTy = SrcTy->getAs<ComplexType>()->getElementType(); + SrcTy = SrcTy->castAs<ComplexType>()->getElementType(); // Handle conversions to bool first, they are special: comparisons against 0. if (DstTy->isBooleanType()) { @@ -795,10 +824,7 @@ EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, } Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { - if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>()) - return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); - - return llvm::Constant::getNullValue(ConvertType(Ty)); + return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty); } /// \brief Emit a sanitization check for the given "binary" operation (which @@ -806,8 +832,8 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { /// operation). The check passes if \p Check, which is an \c i1, is \c true. void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) { StringRef CheckName; - llvm::SmallVector<llvm::Constant *, 4> StaticData; - llvm::SmallVector<llvm::Value *, 2> DynamicData; + SmallVector<llvm::Constant *, 4> StaticData; + SmallVector<llvm::Value *, 2> DynamicData; BinaryOperatorKind Opcode = Info.Opcode; if (BinaryOperator::isCompoundAssignmentOp(Opcode)) @@ -831,7 +857,7 @@ void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) { } else if (Opcode == BO_Div || Opcode == BO_Rem) { // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1). CheckName = "divrem_overflow"; - StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.E->getType())); + StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); } else { // Signed arithmetic overflow (+, -, *). switch (Opcode) { @@ -840,13 +866,14 @@ void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) { case BO_Mul: CheckName = "mul_overflow"; break; default: llvm_unreachable("unexpected opcode for bin op check"); } - StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.E->getType())); + StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); } DynamicData.push_back(Info.LHS); DynamicData.push_back(Info.RHS); } - CGF.EmitCheck(Check, CheckName, StaticData, DynamicData); + CGF.EmitCheck(Check, CheckName, StaticData, DynamicData, + CodeGenFunction::CRK_Recoverable); } //===----------------------------------------------------------------------===// @@ -990,7 +1017,12 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { // integer value. Value *Base = Visit(E->getBase()); Value *Idx = Visit(E->getIdx()); - bool IdxSigned = E->getIdx()->getType()->isSignedIntegerOrEnumerationType(); + QualType IdxTy = E->getIdx()->getType(); + + if (CGF.SanOpts->Bounds) + CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true); + + bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast"); return Builder.CreateExtractElement(Base, Idx, "vecext"); } @@ -1224,7 +1256,15 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); - return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl, + llvm::Value *V = Visit(E); + + // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is + // performed and the object is not of the derived type. + if (CGF.SanitizePerformTypeCheck) + CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), + V, DestTy->getPointeeType()); + + return CGF.GetAddressOfDerivedClass(V, DerivedClassDecl, CE->path_begin(), CE->path_end(), ShouldNullCheckClassCastValue(CE)); } @@ -1352,17 +1392,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { Elt = EmitScalarConversion(Elt, E->getType(), DestTy->getAs<VectorType>()->getElementType()); - // Insert the element in element zero of an undef vector - llvm::Value *UnV = llvm::UndefValue::get(DstTy); - llvm::Value *Idx = Builder.getInt32(0); - UnV = Builder.CreateInsertElement(UnV, Elt, Idx); - // Splat the element across to all elements unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); - llvm::Constant *Zero = Builder.getInt32(0); - llvm::Constant *Mask = llvm::ConstantVector::getSplat(NumElements, Zero); - llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); - return Yay; + return Builder.CreateVectorSplat(NumElements, Elt, "splat");; } case CK_IntegralCast: @@ -1394,6 +1426,11 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { return EmitComplexToScalarConversion(V, E->getType(), DestTy); } + case CK_ZeroToOCLEvent: { + assert(DestTy->isEventT() && "CK_ZeroToOCLEvent cast on non event type"); + return llvm::Constant::getNullValue(ConvertType(DestTy)); + } + } llvm_unreachable("unknown scalar cast"); @@ -1417,7 +1454,7 @@ EmitAddConsiderOverflowBehavior(const UnaryOperator *E, case LangOptions::SOB_Defined: return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec"); case LangOptions::SOB_Undefined: - if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow) + if (!CGF.SanOpts->SignedIntegerOverflow) return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec"); // Fall through. case LangOptions::SOB_Trapping: @@ -1438,21 +1475,60 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre) { QualType type = E->getSubExpr()->getType(); - llvm::Value *value = EmitLoadOfLValue(LV); - llvm::Value *input = value; llvm::PHINode *atomicPHI = 0; + llvm::Value *value; + llvm::Value *input; int amount = (isInc ? 1 : -1); if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { + type = atomicTy->getValueType(); + if (isInc && type->isBooleanType()) { + llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); + if (isPre) { + Builder.Insert(new llvm::StoreInst(True, + LV.getAddress(), LV.isVolatileQualified(), + LV.getAlignment().getQuantity(), + llvm::SequentiallyConsistent)); + return Builder.getTrue(); + } + // For atomic bool increment, we just store true and return it for + // preincrement, do an atomic swap with true for postincrement + return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, + LV.getAddress(), True, llvm::SequentiallyConsistent); + } + // Special case for atomic increment / decrement on integers, emit + // atomicrmw instructions. We skip this if we want to be doing overflow + // checking, and fall into the slow path with the atomic cmpxchg loop. + if (!type->isBooleanType() && type->isIntegerType() && + !(type->isUnsignedIntegerType() && + CGF.SanOpts->UnsignedIntegerOverflow) && + CGF.getLangOpts().getSignedOverflowBehavior() != + LangOptions::SOB_Trapping) { + llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add : + llvm::AtomicRMWInst::Sub; + llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add : + llvm::Instruction::Sub; + llvm::Value *amt = CGF.EmitToMemory( + llvm::ConstantInt::get(ConvertType(type), 1, true), type); + llvm::Value *old = Builder.CreateAtomicRMW(aop, + LV.getAddress(), amt, llvm::SequentiallyConsistent); + return isPre ? Builder.CreateBinOp(op, old, amt) : old; + } + value = EmitLoadOfLValue(LV); + input = value; + // For every other atomic operation, we need to emit a load-op-cmpxchg loop llvm::BasicBlock *startBB = Builder.GetInsertBlock(); llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); + value = CGF.EmitToMemory(value, type); Builder.CreateBr(opBB); Builder.SetInsertPoint(opBB); atomicPHI = Builder.CreatePHI(value->getType(), 2); atomicPHI->addIncoming(value, startBB); - type = atomicTy->getValueType(); value = atomicPHI; + } else { + value = EmitLoadOfLValue(LV); + input = value; } // Special case of integer increment that we have to check first: bool++. @@ -1472,11 +1548,22 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, // Note that signed integer inc/dec with width less than int can't // overflow because of promotion rules; we're just eliding a few steps here. - if (type->isSignedIntegerOrEnumerationType() && - value->getType()->getPrimitiveSizeInBits() >= - CGF.IntTy->getBitWidth()) + if (value->getType()->getPrimitiveSizeInBits() >= + CGF.IntTy->getBitWidth() && + type->isSignedIntegerOrEnumerationType()) { value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc); - else + } else if (value->getType()->getPrimitiveSizeInBits() >= + CGF.IntTy->getBitWidth() && type->isUnsignedIntegerType() && + CGF.SanOpts->UnsignedIntegerOverflow) { + BinOpInfo BinOp; + BinOp.LHS = value; + BinOp.RHS = llvm::ConstantInt::get(value->getType(), 1, false); + BinOp.Ty = E->getType(); + BinOp.Opcode = isInc ? BO_Add : BO_Sub; + BinOp.FPContractable = false; + BinOp.E = E; + value = EmitOverflowCheckedBinOp(BinOp); + } else value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); // Next most common: pointer increment. @@ -1531,7 +1618,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, // Add the inc/dec to the real part. llvm::Value *amt; - if (type->isHalfType()) { + if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { // Another special case: half FP increment should be done via float value = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16), @@ -1553,7 +1640,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, } value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec"); - if (type->isHalfType()) + if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) value = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16), value); @@ -1579,7 +1666,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, llvm::BasicBlock *opBB = Builder.GetInsertBlock(); llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); llvm::Value *old = Builder.CreateAtomicCmpXchg(LV.getAddress(), atomicPHI, - value, llvm::SequentiallyConsistent); + CGF.EmitToMemory(value, type), llvm::SequentiallyConsistent); atomicPHI->addIncoming(old, opBB); llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI); Builder.CreateCondBr(success, contBB, opBB); @@ -1624,12 +1711,15 @@ Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { } Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { - // Perform vector logical not on comparison with zero vector. if (E->getType()->isExtVectorType()) { Value *Oper = Visit(E->getSubExpr()); Value *Zero = llvm::Constant::getNullValue(Oper->getType()); - Value *Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); + Value *Result; + if (Oper->getType()->isFPOrFPVectorTy()) + Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp"); + else + Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); } @@ -1852,20 +1942,63 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( OpInfo.E = E; // Load/convert the LHS. LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); - OpInfo.LHS = EmitLoadOfLValue(LHSLV); llvm::PHINode *atomicPHI = 0; - if (LHSTy->isAtomicType()) { + if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) { + QualType type = atomicTy->getValueType(); + if (!type->isBooleanType() && type->isIntegerType() && + !(type->isUnsignedIntegerType() && + CGF.SanOpts->UnsignedIntegerOverflow) && + CGF.getLangOpts().getSignedOverflowBehavior() != + LangOptions::SOB_Trapping) { + llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP; + switch (OpInfo.Opcode) { + // We don't have atomicrmw operands for *, %, /, <<, >> + case BO_MulAssign: case BO_DivAssign: + case BO_RemAssign: + case BO_ShlAssign: + case BO_ShrAssign: + break; + case BO_AddAssign: + aop = llvm::AtomicRMWInst::Add; + break; + case BO_SubAssign: + aop = llvm::AtomicRMWInst::Sub; + break; + case BO_AndAssign: + aop = llvm::AtomicRMWInst::And; + break; + case BO_XorAssign: + aop = llvm::AtomicRMWInst::Xor; + break; + case BO_OrAssign: + aop = llvm::AtomicRMWInst::Or; + break; + default: + llvm_unreachable("Invalid compound assignment type"); + } + if (aop != llvm::AtomicRMWInst::BAD_BINOP) { + llvm::Value *amt = CGF.EmitToMemory(EmitScalarConversion(OpInfo.RHS, + E->getRHS()->getType(), LHSTy), LHSTy); + Builder.CreateAtomicRMW(aop, LHSLV.getAddress(), amt, + llvm::SequentiallyConsistent); + return LHSLV; + } + } // FIXME: For floating point types, we should be saving and restoring the // floating point environment in the loop. llvm::BasicBlock *startBB = Builder.GetInsertBlock(); llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); + OpInfo.LHS = EmitLoadOfLValue(LHSLV); + OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type); Builder.CreateBr(opBB); Builder.SetInsertPoint(opBB); atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2); atomicPHI->addIncoming(OpInfo.LHS, startBB); OpInfo.LHS = atomicPHI; } + else + OpInfo.LHS = EmitLoadOfLValue(LHSLV); OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType()); @@ -1880,7 +2013,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( llvm::BasicBlock *opBB = Builder.GetInsertBlock(); llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); llvm::Value *old = Builder.CreateAtomicCmpXchg(LHSLV.getAddress(), atomicPHI, - Result, llvm::SequentiallyConsistent); + CGF.EmitToMemory(Result, LHSTy), llvm::SequentiallyConsistent); atomicPHI->addIncoming(old, opBB); llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI); Builder.CreateCondBr(success, contBB, opBB); @@ -1926,10 +2059,10 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { llvm::Value *Cond = 0; - if (CGF.getLangOpts().SanitizeDivideByZero) + if (CGF.SanOpts->IntegerDivideByZero) Cond = Builder.CreateICmpNE(Ops.RHS, Zero); - if (CGF.getLangOpts().SanitizeSignedIntegerOverflow && + if (CGF.SanOpts->SignedIntegerOverflow && Ops.Ty->hasSignedIntegerRepresentation()) { llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); @@ -1948,16 +2081,17 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( } Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { - if (CGF.getLangOpts().SanitizeDivideByZero || - CGF.getLangOpts().SanitizeSignedIntegerOverflow) { + if ((CGF.SanOpts->IntegerDivideByZero || + CGF.SanOpts->SignedIntegerOverflow) && + Ops.Ty->isIntegerType()) { llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); - - if (Ops.Ty->isIntegerType()) - EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); - else if (CGF.getLangOpts().SanitizeDivideByZero && - Ops.Ty->isRealFloatingType()) - EmitBinOpCheck(Builder.CreateFCmpUNE(Ops.RHS, Zero), Ops); + EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); + } else if (CGF.SanOpts->FloatDivideByZero && + Ops.Ty->isRealFloatingType()) { + llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); + EmitBinOpCheck(Builder.CreateFCmpUNE(Ops.RHS, Zero), Ops); } + if (Ops.LHS->getType()->isFPOrFPVectorTy()) { llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); if (CGF.getLangOpts().OpenCL) { @@ -1978,10 +2112,10 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { // Rem in C can't be a floating point type: C99 6.5.5p2. - if (CGF.getLangOpts().SanitizeDivideByZero) { + if (CGF.SanOpts->IntegerDivideByZero) { llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); - if (Ops.Ty->isIntegerType()) + if (Ops.Ty->isIntegerType()) EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); } @@ -1995,27 +2129,32 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { unsigned IID; unsigned OpID = 0; + bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType(); switch (Ops.Opcode) { case BO_Add: case BO_AddAssign: OpID = 1; - IID = llvm::Intrinsic::sadd_with_overflow; + IID = isSigned ? llvm::Intrinsic::sadd_with_overflow : + llvm::Intrinsic::uadd_with_overflow; break; case BO_Sub: case BO_SubAssign: OpID = 2; - IID = llvm::Intrinsic::ssub_with_overflow; + IID = isSigned ? llvm::Intrinsic::ssub_with_overflow : + llvm::Intrinsic::usub_with_overflow; break; case BO_Mul: case BO_MulAssign: OpID = 3; - IID = llvm::Intrinsic::smul_with_overflow; + IID = isSigned ? llvm::Intrinsic::smul_with_overflow : + llvm::Intrinsic::umul_with_overflow; break; default: llvm_unreachable("Unsupported operation for overflow detection"); } OpID <<= 1; - OpID |= 1; + if (isSigned) + OpID |= 1; llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); @@ -2031,10 +2170,10 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { if (handlerName->empty()) { // If the signed-integer-overflow sanitizer is enabled, emit a call to its // runtime. Otherwise, this is a -ftrapv check, so just emit a trap. - if (CGF.getLangOpts().SanitizeSignedIntegerOverflow) + if (!isSigned || CGF.SanOpts->SignedIntegerOverflow) EmitBinOpCheck(Builder.CreateNot(overflow), Ops); else - CGF.EmitTrapvCheck(Builder.CreateNot(overflow)); + CGF.EmitTrapCheck(Builder.CreateNot(overflow)); return result; } @@ -2065,9 +2204,14 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { // Call the handler with the two arguments, the operation, and the size of // the result. - llvm::Value *handlerResult = Builder.CreateCall4(handler, lhs, rhs, - Builder.getInt8(OpID), - Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())); + llvm::Value *handlerArgs[] = { + lhs, + rhs, + Builder.getInt8(OpID), + Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()) + }; + llvm::Value *handlerResult = + CGF.EmitNounwindRuntimeCall(handler, handlerArgs); // Truncate the result back to the desired size. handlerResult = Builder.CreateTrunc(handlerResult, opTy); @@ -2113,6 +2257,10 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, if (isSubtraction) index = CGF.Builder.CreateNeg(index, "idx.neg"); + if (CGF.SanOpts->Bounds) + CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(), + /*Accessed*/ false); + const PointerType *pointerType = pointerOperand->getType()->getAs<PointerType>(); if (!pointerType) { @@ -2217,7 +2365,7 @@ static Value* tryEmitFMulAdd(const BinOpInfo &op, // Check whether -ffp-contract=on. (If -ffp-contract=off/fast, fusing is // either disabled, or handled entirely by the LLVM backend). - if (CGF.getLangOpts().getFPContractMode() != LangOptions::FPC_On) + if (CGF.CGM.getCodeGenOpts().getFPContractMode() != CodeGenOptions::FPC_On) return 0; // We have a potentially fusable op. Look for a mul on one of the operands. @@ -2249,14 +2397,17 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { case LangOptions::SOB_Defined: return Builder.CreateAdd(op.LHS, op.RHS, "add"); case LangOptions::SOB_Undefined: - if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow) + if (!CGF.SanOpts->SignedIntegerOverflow) return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); // Fall through. case LangOptions::SOB_Trapping: return EmitOverflowCheckedBinOp(op); } } - + + if (op.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow) + return EmitOverflowCheckedBinOp(op); + if (op.LHS->getType()->isFPOrFPVectorTy()) { // Try to form an fmuladd. if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) @@ -2276,14 +2427,17 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { case LangOptions::SOB_Defined: return Builder.CreateSub(op.LHS, op.RHS, "sub"); case LangOptions::SOB_Undefined: - if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow) + if (!CGF.SanOpts->SignedIntegerOverflow) return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); // Fall through. case LangOptions::SOB_Trapping: return EmitOverflowCheckedBinOp(op); } } - + + if (op.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow) + return EmitOverflowCheckedBinOp(op); + if (op.LHS->getType()->isFPOrFPVectorTy()) { // Try to form an fmuladd. if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) @@ -2352,6 +2506,15 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); } +Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { + llvm::IntegerType *Ty; + if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) + Ty = cast<llvm::IntegerType>(VT->getElementType()); + else + Ty = cast<llvm::IntegerType>(LHS->getType()); + return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); +} + Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { // LLVM requires the LHS and RHS to be the same type: promote or truncate the // RHS to the same size as the LHS. @@ -2359,18 +2522,20 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { if (Ops.LHS->getType() != RHS->getType()) RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); - if (CGF.getLangOpts().SanitizeShift && + if (CGF.SanOpts->Shift && !CGF.getLangOpts().OpenCL && isa<llvm::IntegerType>(Ops.LHS->getType())) { - unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth(); - llvm::Value *WidthMinusOne = - llvm::ConstantInt::get(RHS->getType(), Width - 1); - // FIXME: Emit the branching explicitly rather than emitting the check - // twice. - EmitBinOpCheck(Builder.CreateICmpULE(RHS, WidthMinusOne), Ops); + llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS); + llvm::Value *Valid = Builder.CreateICmpULE(RHS, WidthMinusOne); if (Ops.Ty->hasSignedIntegerRepresentation()) { + llvm::BasicBlock *Orig = Builder.GetInsertBlock(); + llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); + llvm::BasicBlock *CheckBitsShifted = CGF.createBasicBlock("check"); + Builder.CreateCondBr(Valid, CheckBitsShifted, Cont); + // Check whether we are shifting any non-zero bits off the top of the // integer. + CGF.EmitBlock(CheckBitsShifted); llvm::Value *BitsShiftedOff = Builder.CreateLShr(Ops.LHS, Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros", @@ -2385,9 +2550,19 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); } llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); - EmitBinOpCheck(Builder.CreateICmpEQ(BitsShiftedOff, Zero), Ops); + llvm::Value *SecondCheck = Builder.CreateICmpEQ(BitsShiftedOff, Zero); + CGF.EmitBlock(Cont); + llvm::PHINode *P = Builder.CreatePHI(Valid->getType(), 2); + P->addIncoming(Valid, Orig); + P->addIncoming(SecondCheck, CheckBitsShifted); + Valid = P; } + + EmitBinOpCheck(Valid, Ops); } + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (CGF.getLangOpts().OpenCL) + RHS = Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask"); return Builder.CreateShl(Ops.LHS, RHS, "shl"); } @@ -2399,12 +2574,13 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { if (Ops.LHS->getType() != RHS->getType()) RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); - if (CGF.getLangOpts().SanitizeShift && - isa<llvm::IntegerType>(Ops.LHS->getType())) { - unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth(); - llvm::Value *WidthVal = llvm::ConstantInt::get(RHS->getType(), Width); - EmitBinOpCheck(Builder.CreateICmpULT(RHS, WidthVal), Ops); - } + if (CGF.SanOpts->Shift && !CGF.getLangOpts().OpenCL && + isa<llvm::IntegerType>(Ops.LHS->getType())) + EmitBinOpCheck(Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)), Ops); + + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (CGF.getLangOpts().OpenCL) + RHS = Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask"); if (Ops.Ty->hasUnsignedIntegerRepresentation()) return Builder.CreateLShr(Ops.LHS, RHS, "shr"); @@ -2633,16 +2809,20 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { } Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { - // Perform vector logical and on comparisons with zero vectors. if (E->getType()->isVectorType()) { Value *LHS = Visit(E->getLHS()); Value *RHS = Visit(E->getRHS()); Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); - LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); - RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); + if (LHS->getType()->isFPOrFPVectorTy()) { + LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); + RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); + } else { + LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); + RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); + } Value *And = Builder.CreateAnd(LHS, RHS); - return Builder.CreateSExt(And, Zero->getType(), "sext"); + return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); } llvm::Type *ResTy = ConvertType(E->getType()); @@ -2700,16 +2880,20 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { } Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { - // Perform vector logical or on comparisons with zero vectors. if (E->getType()->isVectorType()) { Value *LHS = Visit(E->getLHS()); Value *RHS = Visit(E->getRHS()); Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); - LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); - RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); + if (LHS->getType()->isFPOrFPVectorTy()) { + LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); + RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); + } else { + LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); + RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); + } Value *Or = Builder.CreateOr(LHS, RHS); - return Builder.CreateSExt(Or, Zero->getType(), "sext"); + return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); } llvm::Type *ResTy = ConvertType(E->getType()); @@ -3007,7 +3191,7 @@ Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { /// EmitScalarExpr - Emit the computation of the specified expression of scalar /// type, ignoring the result. Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { - assert(E && !hasAggregateLLVMType(E->getType()) && + assert(E && hasScalarEvaluationKind(E->getType()) && "Invalid scalar expression to emit"); if (isa<CXXDefaultArgExpr>(E)) @@ -3023,7 +3207,7 @@ Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { /// specified destination type, both of which are LLVM scalar types. Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy) { - assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) && + assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && "Invalid scalar expression to emit"); return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy); } @@ -3034,7 +3218,7 @@ Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy) { - assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) && + assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && "Invalid complex -> scalar conversion"); return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy, DstTy); |