diff options
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp')
-rw-r--r-- | contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp | 479 |
1 files changed, 419 insertions, 60 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp index 1b85c45..1170b01 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" +#include "CGCleanup.h" #include "CGCXXABI.h" #include "CGDebugInfo.h" #include "CGObjCRuntime.h" @@ -24,10 +25,12 @@ #include "clang/AST/StmtVisitor.h" #include "clang/Basic/TargetInfo.h" #include "clang/Frontend/CodeGenOptions.h" +#include "llvm/ADT/Optional.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" +#include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" @@ -42,13 +45,85 @@ using llvm::Value; //===----------------------------------------------------------------------===// namespace { + +/// Determine whether the given binary operation may overflow. +/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul, +/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem}, +/// the returned overflow check is precise. The returned value is 'true' for +/// all other opcodes, to be conservative. +bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS, + BinaryOperator::Opcode Opcode, bool Signed, + llvm::APInt &Result) { + // Assume overflow is possible, unless we can prove otherwise. + bool Overflow = true; + const auto &LHSAP = LHS->getValue(); + const auto &RHSAP = RHS->getValue(); + if (Opcode == BO_Add) { + if (Signed) + Result = LHSAP.sadd_ov(RHSAP, Overflow); + else + Result = LHSAP.uadd_ov(RHSAP, Overflow); + } else if (Opcode == BO_Sub) { + if (Signed) + Result = LHSAP.ssub_ov(RHSAP, Overflow); + else + Result = LHSAP.usub_ov(RHSAP, Overflow); + } else if (Opcode == BO_Mul) { + if (Signed) + Result = LHSAP.smul_ov(RHSAP, Overflow); + else + Result = LHSAP.umul_ov(RHSAP, Overflow); + } else if (Opcode == BO_Div || Opcode == BO_Rem) { + if (Signed && !RHS->isZero()) + Result = LHSAP.sdiv_ov(RHSAP, Overflow); + else + return false; + } + return Overflow; +} + struct BinOpInfo { Value *LHS; Value *RHS; QualType Ty; // Computation Type. BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform - bool FPContractable; + FPOptions FPFeatures; const Expr *E; // Entire expr, for error unsupported. May not be binop. + + /// Check if the binop can result in integer overflow. + bool mayHaveIntegerOverflow() const { + // Without constant input, we can't rule out overflow. + auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS); + auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS); + if (!LHSCI || !RHSCI) + return true; + + llvm::APInt Result; + return ::mayHaveIntegerOverflow( + LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result); + } + + /// Check if the binop computes a division or a remainder. + bool isDivremOp() const { + return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || + Opcode == BO_RemAssign; + } + + /// Check if the binop can result in an integer division by zero. + bool mayHaveIntegerDivisionByZero() const { + if (isDivremOp()) + if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS)) + return CI->isZero(); + return true; + } + + /// Check if the binop can result in a float division by zero. + bool mayHaveFloatDivisionByZero() const { + if (isDivremOp()) + if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS)) + return CFP->isZero(); + return true; + } }; static bool MustVisitNullValue(const Expr *E) { @@ -58,6 +133,83 @@ static bool MustVisitNullValue(const Expr *E) { return E->getType()->isNullPtrType(); } +/// If \p E is a widened promoted integer, get its base (unpromoted) type. +static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx, + const Expr *E) { + const Expr *Base = E->IgnoreImpCasts(); + if (E == Base) + return llvm::None; + + QualType BaseTy = Base->getType(); + if (!BaseTy->isPromotableIntegerType() || + Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType())) + return llvm::None; + + return BaseTy; +} + +/// Check if \p E is a widened promoted integer. +static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) { + return getUnwidenedIntegerType(Ctx, E).hasValue(); +} + +/// Check if we can skip the overflow check for \p Op. +static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) { + assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && + "Expected a unary or binary operator"); + + // If the binop has constant inputs and we can prove there is no overflow, + // we can elide the overflow check. + if (!Op.mayHaveIntegerOverflow()) + return true; + + // If a unary op has a widened operand, the op cannot overflow. + if (const auto *UO = dyn_cast<UnaryOperator>(Op.E)) + return IsWidenedIntegerOp(Ctx, UO->getSubExpr()); + + // We usually don't need overflow checks for binops with widened operands. + // Multiplication with promoted unsigned operands is a special case. + const auto *BO = cast<BinaryOperator>(Op.E); + auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS()); + if (!OptionalLHSTy) + return false; + + auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS()); + if (!OptionalRHSTy) + return false; + + QualType LHSTy = *OptionalLHSTy; + QualType RHSTy = *OptionalRHSTy; + + // This is the simple case: binops without unsigned multiplication, and with + // widened operands. No overflow check is needed here. + if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) || + !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType()) + return true; + + // For unsigned multiplication the overflow check can be elided if either one + // of the unpromoted types are less than half the size of the promoted type. + unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType()); + return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize || + (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; +} + +/// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions. +static void updateFastMathFlags(llvm::FastMathFlags &FMF, + FPOptions FPFeatures) { + FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement()); +} + +/// Propagate fast-math flags from \p Op to the instruction in \p V. +static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) { + if (auto *I = dyn_cast<llvm::Instruction>(V)) { + llvm::FastMathFlags FMF = I->getFastMathFlags(); + updateFastMathFlags(FMF, Op.FPFeatures); + I->setFastMathFlags(FMF); + } + return V; +} + class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, Value*> { CodeGenFunction &CGF; @@ -221,6 +373,15 @@ public: Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { return Visit(GE->getResultExpr()); } + Value *VisitCoawaitExpr(CoawaitExpr *S) { + return CGF.EmitCoawaitExpr(*S).getScalarVal(); + } + Value *VisitCoyieldExpr(CoyieldExpr *S) { + return CGF.EmitCoyieldExpr(*S).getScalarVal(); + } + Value *VisitUnaryCoawait(const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } // Leaves. Value *VisitIntegerLiteral(const IntegerLiteral *E) { @@ -300,6 +461,24 @@ public: return V; } + Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) { + VersionTuple Version = E->getVersion(); + + // If we're checking for a platform older than our minimum deployment + // target, we can fold the check away. + if (Version <= CGF.CGM.getTarget().getPlatformMinVersion()) + return llvm::ConstantInt::get(Builder.getInt1Ty(), 1); + + Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor(); + llvm::Value *Args[] = { + llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()), + llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0), + llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0), + }; + + return CGF.EmitBuiltinAvailable(Args); + } + Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); Value *VisitConvertVectorExpr(ConvertVectorExpr *E); @@ -405,11 +584,7 @@ public: return CGF.LoadCXXThis(); } - Value *VisitExprWithCleanups(ExprWithCleanups *E) { - CGF.enterFullExpression(E); - CodeGenFunction::RunCleanupsScope Scope(CGF); - return Visit(E->getSubExpr()); - } + Value *VisitExprWithCleanups(ExprWithCleanups *E); Value *VisitCXXNewExpr(const CXXNewExpr *E) { return CGF.EmitCXXNewExpr(E); } @@ -464,16 +639,21 @@ public: return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); // Fall through. case LangOptions::SOB_Trapping: + if (CanElideOverflowCheck(CGF.getContext(), Ops)) + return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); return EmitOverflowCheckedBinOp(Ops); } } if (Ops.Ty->isUnsignedIntegerType() && - CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) + CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && + !CanElideOverflowCheck(CGF.getContext(), Ops)) return EmitOverflowCheckedBinOp(Ops); - if (Ops.LHS->getType()->isFPOrFPVectorTy()) - return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); + if (Ops.LHS->getType()->isFPOrFPVectorTy()) { + Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); + return propagateFMFlags(V, Ops); + } return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); } /// Create a binary op that checks for overflow. @@ -1414,10 +1594,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { } // Since target may map different address spaces in AST to the same address // space, an address space conversion may end up as a bitcast. - auto *Src = Visit(E); - return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(CGF, Src, - E->getType(), - DestTy); + return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast( + CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(), + DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy)); } case CK_AtomicToNonAtomic: case CK_NonAtomicToAtomic: @@ -1616,6 +1795,16 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { E->getExprLoc()); } +Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { + CGF.enterFullExpression(E); + CodeGenFunction::RunCleanupsScope Scope(CGF); + Value *V = Visit(E->getSubExpr()); + // Defend against dominance problems caused by jumps out of expression + // evaluation through the shared cleanup block. + Scope.ForceCleanup({&V}); + return V; +} + //===----------------------------------------------------------------------===// // Unary Operators //===----------------------------------------------------------------------===// @@ -1627,7 +1816,7 @@ static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false); BinOp.Ty = E->getType(); BinOp.Opcode = IsInc ? BO_Add : BO_Sub; - BinOp.FPContractable = false; + // FIXME: once UnaryOperator carries FPFeatures, copy it here. BinOp.E = E; return BinOp; } @@ -1645,6 +1834,8 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior( return Builder.CreateNSWAdd(InVal, Amount, Name); // Fall through. case LangOptions::SOB_Trapping: + if (IsWidenedIntegerOp(CGF.getContext(), E->getSubExpr())) + return Builder.CreateNSWAdd(InVal, Amount, Name); return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc)); } llvm_unreachable("Unknown SignedOverflowBehaviorTy"); @@ -1660,6 +1851,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, llvm::Value *input; int amount = (isInc ? 1 : -1); + bool isSubtraction = !isInc; if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { type = atomicTy->getValueType(); @@ -1749,7 +1941,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, numElts, "vla.inc"); else - value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc"); + value = CGF.EmitCheckedInBoundsGEP( + value, numElts, /*SignedIndices=*/false, isSubtraction, + E->getExprLoc(), "vla.inc"); // Arithmetic on function pointers (!) is just +-1. } else if (type->isFunctionType()) { @@ -1759,7 +1953,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, amt, "incdec.funcptr"); else - value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr"); + value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false, + isSubtraction, E->getExprLoc(), + "incdec.funcptr"); value = Builder.CreateBitCast(value, input->getType()); // For everything else, we can just do a simple increment. @@ -1768,7 +1964,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, amt, "incdec.ptr"); else - value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr"); + value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false, + isSubtraction, E->getExprLoc(), + "incdec.ptr"); } // Vector increment/decrement. @@ -1849,7 +2047,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, sizeValue, "incdec.objptr"); else - value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr"); + value = CGF.EmitCheckedInBoundsGEP(value, sizeValue, + /*SignedIndices=*/false, isSubtraction, + E->getExprLoc(), "incdec.objptr"); value = Builder.CreateBitCast(value, input->getType()); } @@ -1891,7 +2091,7 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); BinOp.Ty = E->getType(); BinOp.Opcode = BO_Sub; - BinOp.FPContractable = false; + // FIXME: once UnaryOperator carries FPFeatures, copy it here. BinOp.E = E; return EmitSub(BinOp); } @@ -2112,7 +2312,7 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { Result.RHS = Visit(E->getRHS()); Result.Ty = E->getType(); Result.Opcode = E->getOpcode(); - Result.FPContractable = E->isFPContractable(); + Result.FPFeatures = E->getFPFeatures(); Result.E = E; return Result; } @@ -2132,7 +2332,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( OpInfo.RHS = Visit(E->getRHS()); OpInfo.Ty = E->getComputationResultType(); OpInfo.Opcode = E->getOpcode(); - OpInfo.FPContractable = E->isFPContractable(); + OpInfo.FPFeatures = E->getFPFeatures(); OpInfo.E = E; // Load/convert the LHS. LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); @@ -2263,8 +2463,11 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( SanitizerKind::IntegerDivideByZero)); } + const auto *BO = cast<BinaryOperator>(Ops.E); if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) && - Ops.Ty->hasSignedIntegerRepresentation()) { + Ops.Ty->hasSignedIntegerRepresentation() && + !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) && + Ops.mayHaveIntegerOverflow()) { llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); llvm::Value *IntMin = @@ -2287,11 +2490,13 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { CodeGenFunction::SanitizerScope SanScope(&CGF); if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && - Ops.Ty->isIntegerType()) { + Ops.Ty->isIntegerType() && + (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) && - Ops.Ty->isRealFloatingType()) { + Ops.Ty->isRealFloatingType() && + Ops.mayHaveFloatDivisionByZero()) { llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero); EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero), @@ -2324,12 +2529,13 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { // Rem in C can't be a floating point type: C99 6.5.5p2. - if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { + if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || + CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && + Ops.Ty->isIntegerType() && + (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { CodeGenFunction::SanitizerScope SanScope(&CGF); llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); - - if (Ops.Ty->isIntegerType()) - EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); + EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); } if (Ops.Ty->hasUnsignedIntegerRepresentation()) @@ -2369,6 +2575,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { if (isSigned) OpID |= 1; + CodeGenFunction::SanitizerScope SanScope(&CGF); llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy); @@ -2384,7 +2591,6 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { // If the signed-integer-overflow sanitizer is enabled, emit a call to its // runtime. Otherwise, this is a -ftrapv check, so just emit a trap. if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) { - CodeGenFunction::SanitizerScope SanScope(&CGF); llvm::Value *NotOverflow = Builder.CreateNot(overflow); SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow : SanitizerKind::UnsignedIntegerOverflow; @@ -2460,13 +2666,14 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, std::swap(pointerOperand, indexOperand); } + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); + unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth(); auto &DL = CGF.CGM.getDataLayout(); auto PtrTy = cast<llvm::PointerType>(pointer->getType()); if (width != DL.getTypeSizeInBits(PtrTy)) { // Zero-extend or sign-extend the pointer value according to // whether the index is signed or not. - bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned, "idx.ext"); } @@ -2510,7 +2717,9 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr"); } else { index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); - pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr"); + pointer = + CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction, + op.E->getExprLoc(), "add.ptr"); } return pointer; } @@ -2527,7 +2736,8 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, if (CGF.getLangOpts().isSignedOverflowDefined()) return CGF.Builder.CreateGEP(pointer, index, "add.ptr"); - return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr"); + return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction, + op.E->getExprLoc(), "add.ptr"); } // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and @@ -2577,12 +2787,7 @@ static Value* tryEmitFMulAdd(const BinOpInfo &op, "Only fadd/fsub can be the root of an fmuladd."); // Check whether this op is marked as fusable. - if (!op.FPContractable) - return nullptr; - - // Check whether -ffp-contract=on. (If -ffp-contract=off/fast, fusing is - // either disabled, or handled entirely by the LLVM backend). - if (CGF.CGM.getCodeGenOpts().getFPContractMode() != CodeGenOptions::FPC_On) + if (!op.FPFeatures.allowFPContractWithinStatement()) return nullptr; // We have a potentially fusable op. Look for a mul on one of the operands. @@ -2605,7 +2810,7 @@ static Value* tryEmitFMulAdd(const BinOpInfo &op, Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { if (op.LHS->getType()->isPointerTy() || op.RHS->getType()->isPointerTy()) - return emitPointerArithmetic(CGF, op, /*subtraction*/ false); + return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction); if (op.Ty->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { @@ -2616,12 +2821,15 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); // Fall through. case LangOptions::SOB_Trapping: + if (CanElideOverflowCheck(CGF.getContext(), op)) + return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); return EmitOverflowCheckedBinOp(op); } } if (op.Ty->isUnsignedIntegerType() && - CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) + CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && + !CanElideOverflowCheck(CGF.getContext(), op)) return EmitOverflowCheckedBinOp(op); if (op.LHS->getType()->isFPOrFPVectorTy()) { @@ -2629,7 +2837,8 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) return FMulAdd; - return Builder.CreateFAdd(op.LHS, op.RHS, "add"); + Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add"); + return propagateFMFlags(V, op); } return Builder.CreateAdd(op.LHS, op.RHS, "add"); @@ -2647,19 +2856,23 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); // Fall through. case LangOptions::SOB_Trapping: + if (CanElideOverflowCheck(CGF.getContext(), op)) + return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); return EmitOverflowCheckedBinOp(op); } } if (op.Ty->isUnsignedIntegerType() && - CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) + CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && + !CanElideOverflowCheck(CGF.getContext(), op)) return EmitOverflowCheckedBinOp(op); if (op.LHS->getType()->isFPOrFPVectorTy()) { // Try to form an fmuladd. if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) return FMulAdd; - return Builder.CreateFSub(op.LHS, op.RHS, "sub"); + Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub"); + return propagateFMFlags(V, op); } return Builder.CreateSub(op.LHS, op.RHS, "sub"); @@ -2668,7 +2881,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { // If the RHS is not a pointer, then we have normal pointer // arithmetic. if (!op.RHS->getType()->isPointerTy()) - return emitPointerArithmetic(CGF, op, /*subtraction*/ true); + return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction); // Otherwise, this is a pointer subtraction. @@ -2751,8 +2964,8 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { isa<llvm::IntegerType>(Ops.LHS->getType())) { CodeGenFunction::SanitizerScope SanScope(&CGF); SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; - llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS); - llvm::Value *ValidExponent = Builder.CreateICmpULE(RHS, WidthMinusOne); + llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS); + llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne); if (SanitizeExponent) { Checks.push_back( @@ -2767,12 +2980,14 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); + llvm::Value *PromotedWidthMinusOne = + (RHS == Ops.RHS) ? WidthMinusOne + : GetWidthMinusOneValue(Ops.LHS, RHS); CGF.EmitBlock(CheckShiftBase); - llvm::Value *BitsShiftedOff = - Builder.CreateLShr(Ops.LHS, - Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros", - /*NUW*/true, /*NSW*/true), - "shl.check"); + llvm::Value *BitsShiftedOff = Builder.CreateLShr( + Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros", + /*NUW*/ true, /*NSW*/ true), + "shl.check"); if (CGF.getLangOpts().CPlusPlus) { // In C99, we are not permitted to shift a 1 bit into the sign bit. // Under C++11's rules, shifting a 1 bit into the sign bit is @@ -3038,10 +3253,12 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // because the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after // the assignment...'. - if (LHS.isBitField()) + if (LHS.isBitField()) { CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); - else + } else { + CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc()); CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); + } } // If the result is clearly ignored, return now. @@ -3327,9 +3544,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { // safe to evaluate the LHS and RHS unconditionally. if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { - CGF.incrementProfileCounter(E); - llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); + llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty); + + CGF.incrementProfileCounter(E, StepV); + llvm::Value *LHS = Visit(lhsExpr); llvm::Value *RHS = Visit(rhsExpr); if (!LHS) { @@ -3491,8 +3710,12 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { // vector to get a vec4, then a bitcast if the target type is different. if (NumElementsSrc == 3 && NumElementsDst != 3) { Src = ConvertVec3AndVec4(Builder, CGF, Src, 4); - Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, - DstTy); + + if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { + Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, + DstTy); + } + Src->setName("astype"); return Src; } @@ -3501,9 +3724,12 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { // to vec4 if the original type is not vec4, then a shuffle vector to // get a vec3. if (NumElementsSrc != 3 && NumElementsDst == 3) { - auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4); - Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, - Vec4Ty); + if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { + auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4); + Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, + Vec4Ty); + } + Src = ConvertVec3AndVec4(Builder, CGF, Src, 3); Src->setName("astype"); return Src; @@ -3626,3 +3852,136 @@ LValue CodeGenFunction::EmitCompoundAssignmentLValue( llvm_unreachable("Unhandled compound assignment operator"); } + +Value *CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, + ArrayRef<Value *> IdxList, + bool SignedIndices, + bool IsSubtraction, + SourceLocation Loc, + const Twine &Name) { + Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name); + + // If the pointer overflow sanitizer isn't enabled, do nothing. + if (!SanOpts.has(SanitizerKind::PointerOverflow)) + return GEPVal; + + // If the GEP has already been reduced to a constant, leave it be. + if (isa<llvm::Constant>(GEPVal)) + return GEPVal; + + // Only check for overflows in the default address space. + if (GEPVal->getType()->getPointerAddressSpace()) + return GEPVal; + + auto *GEP = cast<llvm::GEPOperator>(GEPVal); + assert(GEP->isInBounds() && "Expected inbounds GEP"); + + SanitizerScope SanScope(this); + auto &VMContext = getLLVMContext(); + const auto &DL = CGM.getDataLayout(); + auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType()); + + // Grab references to the signed add/mul overflow intrinsics for intptr_t. + auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); + auto *SAddIntrinsic = + CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy); + auto *SMulIntrinsic = + CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy); + + // The total (signed) byte offset for the GEP. + llvm::Value *TotalOffset = nullptr; + // The offset overflow flag - true if the total offset overflows. + llvm::Value *OffsetOverflows = Builder.getFalse(); + + /// Return the result of the given binary operation. + auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS, + llvm::Value *RHS) -> llvm::Value * { + assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"); + + // If the operands are constants, return a constant result. + if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) { + if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) { + llvm::APInt N; + bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode, + /*Signed=*/true, N); + if (HasOverflow) + OffsetOverflows = Builder.getTrue(); + return llvm::ConstantInt::get(VMContext, N); + } + } + + // Otherwise, compute the result with checked arithmetic. + auto *ResultAndOverflow = Builder.CreateCall( + (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); + OffsetOverflows = Builder.CreateOr( + Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); + return Builder.CreateExtractValue(ResultAndOverflow, 0); + }; + + // Determine the total byte offset by looking at each GEP operand. + for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP); + GTI != GTE; ++GTI) { + llvm::Value *LocalOffset; + auto *Index = GTI.getOperand(); + // Compute the local offset contributed by this indexing step: + if (auto *STy = GTI.getStructTypeOrNull()) { + // For struct indexing, the local offset is the byte position of the + // specified field. + unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue(); + LocalOffset = llvm::ConstantInt::get( + IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo)); + } else { + // Otherwise this is array-like indexing. The local offset is the index + // multiplied by the element size. + auto *ElementSize = llvm::ConstantInt::get( + IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType())); + auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true); + LocalOffset = eval(BO_Mul, ElementSize, IndexS); + } + + // If this is the first offset, set it as the total offset. Otherwise, add + // the local offset into the running total. + if (!TotalOffset || TotalOffset == Zero) + TotalOffset = LocalOffset; + else + TotalOffset = eval(BO_Add, TotalOffset, LocalOffset); + } + + // Common case: if the total offset is zero, don't emit a check. + if (TotalOffset == Zero) + return GEPVal; + + // Now that we've computed the total offset, add it to the base pointer (with + // wrapping semantics). + auto *IntPtr = Builder.CreatePtrToInt(GEP->getPointerOperand(), IntPtrTy); + auto *ComputedGEP = Builder.CreateAdd(IntPtr, TotalOffset); + + // The GEP is valid if: + // 1) The total offset doesn't overflow, and + // 2) The sign of the difference between the computed address and the base + // pointer matches the sign of the total offset. + llvm::Value *ValidGEP; + auto *NoOffsetOverflow = Builder.CreateNot(OffsetOverflows); + if (SignedIndices) { + auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); + auto *PosOrZeroOffset = Builder.CreateICmpSGE(TotalOffset, Zero); + llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); + ValidGEP = Builder.CreateAnd( + Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid), + NoOffsetOverflow); + } else if (!SignedIndices && !IsSubtraction) { + auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); + ValidGEP = Builder.CreateAnd(PosOrZeroValid, NoOffsetOverflow); + } else { + auto *NegOrZeroValid = Builder.CreateICmpULE(ComputedGEP, IntPtr); + ValidGEP = Builder.CreateAnd(NegOrZeroValid, NoOffsetOverflow); + } + + llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; + // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. + llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP}; + EmitCheck(std::make_pair(ValidGEP, SanitizerKind::PointerOverflow), + SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs); + + return GEPVal; +} |