summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp')
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp482
1 files changed, 375 insertions, 107 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
index 1cccafe..b429b1d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -28,7 +28,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/Module.h"
#include "llvm/Support/CFG.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <cstdarg>
using namespace clang;
@@ -45,6 +45,7 @@ struct BinOpInfo {
Value *RHS;
QualType Ty; // Computation Type.
BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
+ bool FPContractable;
const Expr *E; // Entire expr, for error unsupported. May not be binop.
};
@@ -80,7 +81,11 @@ public:
llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
- LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
+ LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
+ return CGF.EmitCheckedLValue(E, TCK);
+ }
+
+ void EmitBinOpCheck(Value *Check, const BinOpInfo &Info);
Value *EmitLoadOfLValue(LValue LV) {
return CGF.EmitLoadOfLValue(LV).getScalarVal();
@@ -90,13 +95,19 @@ public:
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
- return EmitLoadOfLValue(EmitCheckedLValue(E));
+ return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load));
}
/// EmitConversionToBool - Convert the specified expression value to a
/// boolean (i1) truth value. This is equivalent to "Val != 0".
Value *EmitConversionToBool(Value *Src, QualType DstTy);
+ /// \brief Emit a check that a conversion to or from a floating-point type
+ /// does not overflow.
+ void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
+ Value *Src, QualType SrcType,
+ QualType DstType, llvm::Type *DstTy);
+
/// EmitScalarConversion - Emit a conversion from the specified type to the
/// specified destination type, both of which are LLVM scalar types.
Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
@@ -391,34 +402,26 @@ public:
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ // Fall through.
case LangOptions::SOB_Trapping:
return EmitOverflowCheckedBinOp(Ops);
}
}
-
+
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
- bool isTrapvOverflowBehavior() {
- return CGF.getContext().getLangOpts().getSignedOverflowBehavior()
- == LangOptions::SOB_Trapping;
- }
/// Create a binary op that checks for overflow.
/// Currently only supports +, - and *.
Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
- // Emit the overflow BB when -ftrapv option is activated.
- void EmitOverflowBB(llvm::BasicBlock *overflowBB) {
- Builder.SetInsertPoint(overflowBB);
- llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
- Builder.CreateCall(Trap);
- Builder.CreateUnreachable();
- }
+
// Check for undefined division and modulus behaviors.
void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
llvm::Value *Zero,bool isDiv);
@@ -537,6 +540,110 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
return EmitPointerToBoolConversion(Src);
}
+void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc,
+ QualType OrigSrcType,
+ Value *Src, QualType SrcType,
+ QualType DstType,
+ llvm::Type *DstTy) {
+ using llvm::APFloat;
+ using llvm::APSInt;
+
+ llvm::Type *SrcTy = Src->getType();
+
+ llvm::Value *Check = 0;
+ if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
+ // Integer to floating-point. This can fail for unsigned short -> __half
+ // or unsigned __int128 -> float.
+ assert(DstType->isFloatingType());
+ bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
+
+ APFloat LargestFloat =
+ APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
+ APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
+
+ bool IsExact;
+ if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
+ &IsExact) != APFloat::opOK)
+ // The range of representable values of this floating point type includes
+ // all values of this integer type. Don't need an overflow check.
+ return;
+
+ llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
+ if (SrcIsUnsigned)
+ Check = Builder.CreateICmpULE(Src, Max);
+ else {
+ llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
+ llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
+ llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
+ Check = Builder.CreateAnd(GE, LE);
+ }
+ } else {
+ // Floating-point to integer or floating-point to floating-point. This has
+ // undefined behavior if the source is +-Inf, NaN, or doesn't fit into the
+ // destination type.
+ const llvm::fltSemantics &SrcSema =
+ CGF.getContext().getFloatTypeSemantics(OrigSrcType);
+ APFloat MaxSrc(SrcSema, APFloat::uninitialized);
+ APFloat MinSrc(SrcSema, APFloat::uninitialized);
+
+ if (isa<llvm::IntegerType>(DstTy)) {
+ unsigned Width = CGF.getContext().getIntWidth(DstType);
+ bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
+
+ APSInt Min = APSInt::getMinValue(Width, Unsigned);
+ if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
+ APFloat::opOverflow)
+ // Don't need an overflow check for lower bound. Just check for
+ // -Inf/NaN.
+ MinSrc = APFloat::getLargest(SrcSema, true);
+
+ APSInt Max = APSInt::getMaxValue(Width, Unsigned);
+ if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
+ APFloat::opOverflow)
+ // Don't need an overflow check for upper bound. Just check for
+ // +Inf/NaN.
+ MaxSrc = APFloat::getLargest(SrcSema, false);
+ } else {
+ const llvm::fltSemantics &DstSema =
+ CGF.getContext().getFloatTypeSemantics(DstType);
+ bool IsInexact;
+
+ MinSrc = APFloat::getLargest(DstSema, true);
+ if (MinSrc.convert(SrcSema, APFloat::rmTowardZero, &IsInexact) &
+ APFloat::opOverflow)
+ MinSrc = APFloat::getLargest(SrcSema, true);
+
+ MaxSrc = APFloat::getLargest(DstSema, false);
+ if (MaxSrc.convert(SrcSema, APFloat::rmTowardZero, &IsInexact) &
+ APFloat::opOverflow)
+ MaxSrc = APFloat::getLargest(SrcSema, false);
+ }
+
+ // If we're converting from __half, convert the range to float to match
+ // the type of src.
+ if (OrigSrcType->isHalfType()) {
+ const llvm::fltSemantics &Sema =
+ CGF.getContext().getFloatTypeSemantics(SrcType);
+ bool IsInexact;
+ MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
+ }
+
+ llvm::Value *GE =
+ Builder.CreateFCmpOGE(Src, llvm::ConstantFP::get(VMContext, MinSrc));
+ llvm::Value *LE =
+ Builder.CreateFCmpOLE(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
+ Check = Builder.CreateAnd(GE, LE);
+ }
+
+ // FIXME: Provide a SourceLocation.
+ llvm::Constant *StaticArgs[] = {
+ CGF.EmitCheckTypeDescriptor(OrigSrcType),
+ CGF.EmitCheckTypeDescriptor(DstType)
+ };
+ CGF.EmitCheck(Check, "float_cast_overflow", StaticArgs, OrigSrc);
+}
+
/// EmitScalarConversion - Emit a conversion from the specified type to the
/// specified destination type, both of which are LLVM scalar types.
Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
@@ -547,6 +654,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (DstType->isVoidType()) return 0;
+ llvm::Value *OrigSrc = Src;
+ QualType OrigSrcType = SrcType;
llvm::Type *SrcTy = Src->getType();
// Floating casts might be a bit special: if we're doing casts to / from half
@@ -620,6 +729,12 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
Value *Res = NULL;
llvm::Type *ResTy = DstTy;
+ // An overflowing conversion has undefined behavior if either the source type
+ // or the destination type is a floating-point type.
+ if (CGF.getLangOpts().SanitizeFloatCastOverflow &&
+ (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
+ EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy);
+
// Cast to half via float
if (DstType->isHalfType())
DstTy = CGF.FloatTy;
@@ -686,6 +801,54 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
return llvm::Constant::getNullValue(ConvertType(Ty));
}
+/// \brief Emit a sanitization check for the given "binary" operation (which
+/// might actually be a unary increment which has been lowered to a binary
+/// operation). The check passes if \p Check, which is an \c i1, is \c true.
+void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
+ StringRef CheckName;
+ llvm::SmallVector<llvm::Constant *, 4> StaticData;
+ llvm::SmallVector<llvm::Value *, 2> DynamicData;
+
+ BinaryOperatorKind Opcode = Info.Opcode;
+ if (BinaryOperator::isCompoundAssignmentOp(Opcode))
+ Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
+
+ StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
+ const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
+ if (UO && UO->getOpcode() == UO_Minus) {
+ CheckName = "negate_overflow";
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
+ DynamicData.push_back(Info.RHS);
+ } else {
+ if (BinaryOperator::isShiftOp(Opcode)) {
+ // Shift LHS negative or too large, or RHS out of bounds.
+ CheckName = "shift_out_of_bounds";
+ const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
+ StaticData.push_back(
+ CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
+ StaticData.push_back(
+ CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
+ } else if (Opcode == BO_Div || Opcode == BO_Rem) {
+ // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
+ CheckName = "divrem_overflow";
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.E->getType()));
+ } else {
+ // Signed arithmetic overflow (+, -, *).
+ switch (Opcode) {
+ case BO_Add: CheckName = "add_overflow"; break;
+ case BO_Sub: CheckName = "sub_overflow"; break;
+ case BO_Mul: CheckName = "mul_overflow"; break;
+ default: llvm_unreachable("unexpected opcode for bin op check");
+ }
+ StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.E->getType()));
+ }
+ DynamicData.push_back(Info.LHS);
+ DynamicData.push_back(Info.RHS);
+ }
+
+ CGF.EmitCheck(Check, CheckName, StaticData, DynamicData);
+}
+
//===----------------------------------------------------------------------===//
// Visitor Methods
//===----------------------------------------------------------------------===//
@@ -802,7 +965,8 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
// debug info size.
CGDebugInfo *DI = CGF.getDebugInfo();
if (DI &&
- CGF.CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo) {
+ CGF.CGM.getCodeGenOpts().getDebugInfo()
+ == CodeGenOptions::LimitedDebugInfo) {
QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
@@ -1032,7 +1196,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// are in the same order as in the CastKind enum.
switch (Kind) {
case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
-
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
Value *V = EmitLValue(E).getAddress();
@@ -1055,19 +1221,18 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Visit(const_cast<Expr*>(E));
case CK_BaseToDerived: {
- const CXXRecordDecl *DerivedClassDecl =
- DestTy->getCXXRecordDeclForPointerType();
-
- return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
+ const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
+ assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
+
+ return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
CE->path_begin(), CE->path_end(),
ShouldNullCheckClassCastValue(CE));
}
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
- const RecordType *DerivedClassTy =
- E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
- CXXRecordDecl *DerivedClassDecl =
- cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+ const CXXRecordDecl *DerivedClassDecl =
+ E->getType()->getPointeeCXXRecordDecl();
+ assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!");
return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
CE->path_begin(), CE->path_end(),
@@ -1248,17 +1413,20 @@ llvm::Value *ScalarExprEmitter::
EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
llvm::Value *InVal,
llvm::Value *NextVal, bool IsInc) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ // Fall through.
case LangOptions::SOB_Trapping:
BinOpInfo BinOp;
BinOp.LHS = InVal;
BinOp.RHS = NextVal;
BinOp.Ty = E->getType();
BinOp.Opcode = BO_Add;
+ BinOp.FPContractable = false;
BinOp.E = E;
return EmitOverflowCheckedBinOp(BinOp);
}
@@ -1300,7 +1468,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Most common case by far: integer increment.
} else if (type->isIntegerType()) {
- llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
// Note that signed integer inc/dec with width less than int can't
// overflow because of promotion rules; we're just eliding a few steps here.
@@ -1320,7 +1488,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
= CGF.getContext().getAsVariableArrayType(type)) {
llvm::Value *numElts = CGF.getVLASize(vla).first;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, numElts, "vla.inc");
else
value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
@@ -1330,7 +1498,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = Builder.getInt32(amount);
value = CGF.EmitCastToVoidPtr(value);
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.funcptr");
else
value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
@@ -1339,7 +1507,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// For everything else, we can just do a simple increment.
} else {
llvm::Value *amt = Builder.getInt32(amount);
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.ptr");
else
value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
@@ -1400,7 +1568,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *sizeValue =
llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
- if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
else
value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
@@ -1444,6 +1612,7 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
BinOp.Ty = E->getType();
BinOp.Opcode = BO_Sub;
+ BinOp.FPContractable = false;
BinOp.E = E;
return EmitSub(BinOp);
}
@@ -1652,6 +1821,7 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
Result.RHS = Visit(E->getRHS());
Result.Ty = E->getType();
Result.Opcode = E->getOpcode();
+ Result.FPContractable = E->isFPContractable();
Result.E = E;
return Result;
}
@@ -1678,9 +1848,10 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.RHS = Visit(E->getRHS());
OpInfo.Ty = E->getComputationResultType();
OpInfo.Opcode = E->getOpcode();
+ OpInfo.FPContractable = false;
OpInfo.E = E;
// Load/convert the LHS.
- LValue LHSLV = EmitCheckedLValue(E->getLHS());
+ LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
OpInfo.LHS = EmitLoadOfLValue(LHSLV);
llvm::PHINode *atomicPHI = 0;
@@ -1740,7 +1911,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
return 0;
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOpts().CPlusPlus)
+ if (!CGF.getLangOpts().CPlusPlus)
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -1752,56 +1923,44 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
}
void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
- const BinOpInfo &Ops,
- llvm::Value *Zero, bool isDiv) {
- llvm::Function::iterator insertPt = Builder.GetInsertBlock();
- llvm::BasicBlock *contBB =
- CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn,
- llvm::next(insertPt));
- llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+ const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
+ llvm::Value *Cond = 0;
+
+ if (CGF.getLangOpts().SanitizeDivideByZero)
+ Cond = Builder.CreateICmpNE(Ops.RHS, Zero);
- llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
+ if (CGF.getLangOpts().SanitizeSignedIntegerOverflow &&
+ Ops.Ty->hasSignedIntegerRepresentation()) {
+ llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
- if (Ops.Ty->hasSignedIntegerRepresentation()) {
llvm::Value *IntMin =
Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
- llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero);
- llvm::Value *LHSCmp = Builder.CreateICmpEQ(Ops.LHS, IntMin);
- llvm::Value *RHSCmp = Builder.CreateICmpEQ(Ops.RHS, NegOne);
- llvm::Value *Cond2 = Builder.CreateAnd(LHSCmp, RHSCmp, "and");
- Builder.CreateCondBr(Builder.CreateOr(Cond1, Cond2, "or"),
- overflowBB, contBB);
- } else {
- CGF.Builder.CreateCondBr(Builder.CreateICmpEQ(Ops.RHS, Zero),
- overflowBB, contBB);
+ llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
+ llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
+ llvm::Value *Overflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
+ Cond = Cond ? Builder.CreateAnd(Cond, Overflow, "and") : Overflow;
}
- EmitOverflowBB(overflowBB);
- Builder.SetInsertPoint(contBB);
+
+ if (Cond)
+ EmitBinOpCheck(Cond, Ops);
}
Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
- if (isTrapvOverflowBehavior()) {
+ if (CGF.getLangOpts().SanitizeDivideByZero ||
+ CGF.getLangOpts().SanitizeSignedIntegerOverflow) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
if (Ops.Ty->isIntegerType())
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
- else if (Ops.Ty->isRealFloatingType()) {
- llvm::Function::iterator insertPt = Builder.GetInsertBlock();
- llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn,
- llvm::next(insertPt));
- llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow",
- CGF.CurFn);
- CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero),
- overflowBB, DivCont);
- EmitOverflowBB(overflowBB);
- Builder.SetInsertPoint(DivCont);
- }
+ else if (CGF.getLangOpts().SanitizeDivideByZero &&
+ Ops.Ty->isRealFloatingType())
+ EmitBinOpCheck(Builder.CreateFCmpUNE(Ops.RHS, Zero), Ops);
}
if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
- if (CGF.getContext().getLangOpts().OpenCL) {
+ if (CGF.getLangOpts().OpenCL) {
// OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
llvm::Type *ValTy = Val->getType();
if (ValTy->isFloatTy() ||
@@ -1819,7 +1978,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
- if (isTrapvOverflowBehavior()) {
+ if (CGF.getLangOpts().SanitizeDivideByZero) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
if (Ops.Ty->isIntegerType())
@@ -1866,6 +2025,19 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+ // Handle overflow with llvm.trap if no custom handler has been specified.
+ const std::string *handlerName =
+ &CGF.getLangOpts().OverflowHandler;
+ if (handlerName->empty()) {
+ // If the signed-integer-overflow sanitizer is enabled, emit a call to its
+ // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
+ if (CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ EmitBinOpCheck(Builder.CreateNot(overflow), Ops);
+ else
+ CGF.EmitTrapvCheck(Builder.CreateNot(overflow));
+ return result;
+ }
+
// Branch in case of overflow.
llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
llvm::Function::iterator insertPt = initialBB;
@@ -1875,15 +2047,6 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Builder.CreateCondBr(overflow, overflowBB, continueBB);
- // Handle overflow with llvm.trap.
- const std::string *handlerName =
- &CGF.getContext().getLangOpts().OverflowHandler;
- if (handlerName->empty()) {
- EmitOverflowBB(overflowBB);
- Builder.SetInsertPoint(continueBB);
- return result;
- }
-
// If an overflow handler is set, then we want to call it and then use its
// result, if it returns.
Builder.SetInsertPoint(overflowBB);
@@ -2001,24 +2164,106 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
}
+// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
+// Addend. Use negMul and negAdd to negate the first operand of the Mul or
+// the add operand respectively. This allows fmuladd to represent a*b-c, or
+// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
+// efficient operations.
+static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
+ const CodeGenFunction &CGF, CGBuilderTy &Builder,
+ bool negMul, bool negAdd) {
+ assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
+
+ Value *MulOp0 = MulOp->getOperand(0);
+ Value *MulOp1 = MulOp->getOperand(1);
+ if (negMul) {
+ MulOp0 =
+ Builder.CreateFSub(
+ llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
+ "neg");
+ } else if (negAdd) {
+ Addend =
+ Builder.CreateFSub(
+ llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
+ "neg");
+ }
+
+ Value *FMulAdd =
+ Builder.CreateCall3(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
+ MulOp0, MulOp1, Addend);
+ MulOp->eraseFromParent();
+
+ return FMulAdd;
+}
+
+// Check whether it would be legal to emit an fmuladd intrinsic call to
+// represent op and if so, build the fmuladd.
+//
+// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
+// Does NOT check the type of the operation - it's assumed that this function
+// will be called from contexts where it's known that the type is contractable.
+static Value* tryEmitFMulAdd(const BinOpInfo &op,
+ const CodeGenFunction &CGF, CGBuilderTy &Builder,
+ bool isSub=false) {
+
+ assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
+ op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
+ "Only fadd/fsub can be the root of an fmuladd.");
+
+ // Check whether this op is marked as fusable.
+ if (!op.FPContractable)
+ return 0;
+
+ // Check whether -ffp-contract=on. (If -ffp-contract=off/fast, fusing is
+ // either disabled, or handled entirely by the LLVM backend).
+ if (CGF.getLangOpts().getFPContractMode() != LangOptions::FPC_On)
+ return 0;
+
+ // We have a potentially fusable op. Look for a mul on one of the operands.
+ if (llvm::BinaryOperator* LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
+ if (LHSBinOp->getOpcode() == llvm::Instruction::FMul) {
+ assert(LHSBinOp->getNumUses() == 0 &&
+ "Operations with multiple uses shouldn't be contracted.");
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ }
+ } else if (llvm::BinaryOperator* RHSBinOp =
+ dyn_cast<llvm::BinaryOperator>(op.RHS)) {
+ if (RHSBinOp->getOpcode() == llvm::Instruction::FMul) {
+ assert(RHSBinOp->getNumUses() == 0 &&
+ "Operations with multiple uses shouldn't be contracted.");
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ }
+ }
+
+ return 0;
+}
+
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
if (op.LHS->getType()->isPointerTy() ||
op.RHS->getType()->isPointerTy())
return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
if (op.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ // Fall through.
case LangOptions::SOB_Trapping:
return EmitOverflowCheckedBinOp(op);
}
}
- if (op.LHS->getType()->isFPOrFPVectorTy())
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
+ return FMulAdd;
+
return Builder.CreateFAdd(op.LHS, op.RHS, "add");
+ }
return Builder.CreateAdd(op.LHS, op.RHS, "add");
}
@@ -2027,18 +2272,24 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
// The LHS is always a pointer if either side is.
if (!op.LHS->getType()->isPointerTy()) {
if (op.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
+ switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Defined:
return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ case LangOptions::SOB_Undefined:
+ if (!CGF.getLangOpts().SanitizeSignedIntegerOverflow)
+ return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
+ // Fall through.
case LangOptions::SOB_Trapping:
return EmitOverflowCheckedBinOp(op);
}
}
- if (op.LHS->getType()->isFPOrFPVectorTy())
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
+ return FMulAdd;
return Builder.CreateFSub(op.LHS, op.RHS, "sub");
+ }
return Builder.CreateSub(op.LHS, op.RHS, "sub");
}
@@ -2108,14 +2359,34 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.CatchUndefined
- && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ if (CGF.getLangOpts().SanitizeShift &&
+ isa<llvm::IntegerType>(Ops.LHS->getType())) {
unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
- llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
- CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
- llvm::ConstantInt::get(RHS->getType(), Width)),
- Cont, CGF.getTrapBB());
- CGF.EmitBlock(Cont);
+ llvm::Value *WidthMinusOne =
+ llvm::ConstantInt::get(RHS->getType(), Width - 1);
+ // FIXME: Emit the branching explicitly rather than emitting the check
+ // twice.
+ EmitBinOpCheck(Builder.CreateICmpULE(RHS, WidthMinusOne), Ops);
+
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ // Check whether we are shifting any non-zero bits off the top of the
+ // integer.
+ llvm::Value *BitsShiftedOff =
+ Builder.CreateLShr(Ops.LHS,
+ Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros",
+ /*NUW*/true, /*NSW*/true),
+ "shl.check");
+ if (CGF.getLangOpts().CPlusPlus) {
+ // In C99, we are not permitted to shift a 1 bit into the sign bit.
+ // Under C++11's rules, shifting a 1 bit into the sign bit is
+ // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
+ // define signed left shifts, so we use the C99 and C++11 rules there).
+ llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
+ BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
+ }
+ llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
+ EmitBinOpCheck(Builder.CreateICmpEQ(BitsShiftedOff, Zero), Ops);
+ }
}
return Builder.CreateShl(Ops.LHS, RHS, "shl");
@@ -2128,14 +2399,11 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.CatchUndefined
- && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ if (CGF.getLangOpts().SanitizeShift &&
+ isa<llvm::IntegerType>(Ops.LHS->getType())) {
unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
- llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
- CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
- llvm::ConstantInt::get(RHS->getType(), Width)),
- Cont, CGF.getTrapBB());
- CGF.EmitBlock(Cont);
+ llvm::Value *WidthVal = llvm::ConstantInt::get(RHS->getType(), Width);
+ EmitBinOpCheck(Builder.CreateICmpULT(RHS, WidthVal), Ops);
}
if (Ops.Ty->hasUnsignedIntegerRepresentation())
@@ -2326,7 +2594,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
case Qualifiers::OCL_Weak:
RHS = Visit(E->getRHS());
- LHS = EmitCheckedLValue(E->getLHS());
+ LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
break;
@@ -2336,7 +2604,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// __block variables need to have the rhs evaluated first, plus
// this should improve codegen just a little.
RHS = Visit(E->getRHS());
- LHS = EmitCheckedLValue(E->getLHS());
+ LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
// Store the value into the LHS. Bit-fields are handled specially
// because the result is altered by the store, i.e., [C99 6.5.16p1]
@@ -2353,7 +2621,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
return 0;
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOpts().CPlusPlus)
+ if (!CGF.getLangOpts().CPlusPlus)
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -2567,7 +2835,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// OpenCL: If the condition is a vector, we can treat this condition like
// the select function.
- if (CGF.getContext().getLangOpts().OpenCL
+ if (CGF.getLangOpts().OpenCL
&& condExpr->getType()->isVectorType()) {
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
llvm::Value *LHS = Visit(lhsExpr);
OpenPOWER on IntegriCloud