summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h')
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h440
1 files changed, 324 insertions, 116 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
index 5861340..753dd92 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -115,9 +115,12 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(MissingReturn, missing_return, 0) \
SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
+ SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
+ SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
- SANITIZER_CHECK(NonnullReturn, nonnull_return, 0) \
+ SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
+ SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
@@ -173,6 +176,25 @@ public:
// because of jumps.
VarBypassDetector Bypasses;
+ // CodeGen lambda for loops and support for ordered clause
+ typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
+ JumpDest)>
+ CodeGenLoopTy;
+ typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
+ const unsigned, const bool)>
+ CodeGenOrderedTy;
+
+ // Codegen lambda for loop bounds in worksharing loop constructs
+ typedef llvm::function_ref<std::pair<LValue, LValue>(
+ CodeGenFunction &, const OMPExecutableDirective &S)>
+ CodeGenLoopBoundsTy;
+
+ // Codegen lambda for loop bounds in dispatch-based loop implementation
+ typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
+ CodeGenFunction &, const OMPExecutableDirective &S, Address LB,
+ Address UB)>
+ CodeGenDispatchBoundsTy;
+
/// \brief CGBuilder insert helper. This function is called after an
/// instruction is created using Builder.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
@@ -212,6 +234,13 @@ public:
/// value. This is invalid iff the function has no return value.
Address ReturnValue;
+ /// Return true if a label was seen in the current scope.
+ bool hasLabelBeenSeenInCurrentScope() const {
+ if (CurLexicalScope)
+ return CurLexicalScope->hasLabels();
+ return !LabelMap.empty();
+ }
+
/// AllocaInsertPoint - This is an instruction in the entry block before which
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
@@ -298,6 +327,31 @@ public:
~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
};
+ /// An abstract representation of regular/ObjC call/message targets.
+ class AbstractCallee {
+ /// The function declaration of the callee.
+ const Decl *CalleeDecl;
+
+ public:
+ AbstractCallee() : CalleeDecl(nullptr) {}
+ AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
+ AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
+ bool hasFunctionDecl() const {
+ return dyn_cast_or_null<FunctionDecl>(CalleeDecl);
+ }
+ const Decl *getDecl() const { return CalleeDecl; }
+ unsigned getNumParams() const {
+ if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
+ return FD->getNumParams();
+ return cast<ObjCMethodDecl>(CalleeDecl)->param_size();
+ }
+ const ParmVarDecl *getParamDecl(unsigned I) const {
+ if (const auto *FD = dyn_cast<FunctionDecl>(CalleeDecl))
+ return FD->getParamDecl(I);
+ return *(cast<ObjCMethodDecl>(CalleeDecl)->param_begin() + I);
+ }
+ };
+
/// \brief Sanitizers enabled for this function.
SanitizerSet SanOpts;
@@ -548,14 +602,10 @@ public:
CGF.DidCallStackSave = false;
}
- /// \brief Exit this cleanup scope, emitting any accumulated
- /// cleanups.
+ /// \brief Exit this cleanup scope, emitting any accumulated cleanups.
~RunCleanupsScope() {
- if (PerformCleanup) {
- CGF.DidCallStackSave = OldDidCallStackSave;
- CGF.PopCleanupBlocks(CleanupStackDepth,
- LifetimeExtendedCleanupStackSize);
- }
+ if (PerformCleanup)
+ ForceCleanup();
}
/// \brief Determine whether this scope requires any cleanups.
@@ -565,11 +615,15 @@ public:
/// \brief Force the emission of cleanups now, instead of waiting
/// until this object is destroyed.
- void ForceCleanup() {
+ /// \param ValuesToReload - A list of values that need to be available at
+ /// the insertion point after cleanup emission. If cleanup emission created
+ /// a shared cleanup block, these value pointers will be rewritten.
+ /// Otherwise, they not will be modified.
+ void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
assert(PerformCleanup && "Already forced cleanup");
CGF.DidCallStackSave = OldDidCallStackSave;
- CGF.PopCleanupBlocks(CleanupStackDepth,
- LifetimeExtendedCleanupStackSize);
+ CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
+ ValuesToReload);
PerformCleanup = false;
}
};
@@ -620,6 +674,10 @@ public:
rescopeLabels();
}
+ bool hasLabels() const {
+ return !Labels.empty();
+ }
+
void rescopeLabels();
};
@@ -727,13 +785,17 @@ public:
/// \brief Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added.
- void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
+ void
+ PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
+ std::initializer_list<llvm::Value **> ValuesToReload = {});
/// \brief Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added, then adds all lifetime-extended cleanups from
/// the given position to the stack.
- void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
- size_t OldLifetimeExtendedStackSize);
+ void
+ PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
+ size_t OldLifetimeExtendedStackSize,
+ std::initializer_list<llvm::Value **> ValuesToReload = {});
void ResolveBranchFixups(llvm::BasicBlock *Target);
@@ -1116,10 +1178,11 @@ private:
uint64_t LoopCount);
public:
- /// Increment the profiler's counter for the given statement.
- void incrementProfileCounter(const Stmt *S) {
+ /// Increment the profiler's counter for the given statement by \p StepV.
+ /// If \p StepV is null, the default increment is 1.
+ void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
if (CGM.getCodeGenOpts().hasProfileClangInstr())
- PGO.emitCounterIncrement(Builder, S);
+ PGO.emitCounterIncrement(Builder, S, StepV);
PGO.setCurrentStmt(S);
}
@@ -1334,6 +1397,27 @@ private:
/// information about the layout of the variable.
llvm::DenseMap<const ValueDecl *, BlockByrefInfo> BlockByrefInfos;
+ /// Used by -fsanitize=nullability-return to determine whether the return
+ /// value can be checked.
+ llvm::Value *RetValNullabilityPrecondition = nullptr;
+
+ /// Check if -fsanitize=nullability-return instrumentation is required for
+ /// this function.
+ bool requiresReturnValueNullabilityCheck() const {
+ return RetValNullabilityPrecondition;
+ }
+
+ /// Used to store precise source locations for return statements by the
+ /// runtime return value checks.
+ Address ReturnLocation = Address::invalid();
+
+ /// Check if the return value of this function requires sanitization.
+ bool requiresReturnValueCheck() const {
+ return requiresReturnValueNullabilityCheck() ||
+ (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
+ CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>());
+ }
+
llvm::BasicBlock *TerminateLandingPad;
llvm::BasicBlock *TerminateHandler;
llvm::BasicBlock *TrapBB;
@@ -1341,16 +1425,8 @@ private:
/// True if we need emit the life-time markers.
const bool ShouldEmitLifetimeMarkers;
- /// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
- /// In the kernel metadata node, reference the kernel function and metadata
- /// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
- /// - A node for the vec_type_hint(<type>) qualifier contains string
- /// "vec_type_hint", an undefined value of the <type> data type,
- /// and a Boolean that is true if the <type> is integer and signed.
- /// - A node for the work_group_size_hint(X,Y,Z) qualifier contains string
- /// "work_group_size_hint", and three 32-bit integers X, Y and Z.
- /// - A node for the reqd_work_group_size(X,Y,Z) qualifier contains string
- /// "reqd_work_group_size", and three 32-bit integers X, Y and Z.
+ /// Add OpenCL kernel arg metadata and the kernel attribute meatadata to
+ /// the function metadata.
void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
llvm::Function *Fn);
@@ -1403,6 +1479,9 @@ public:
const TargetInfo &getTarget() const { return Target; }
llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
+ const TargetCodeGenInfo &getTargetHooks() const {
+ return CGM.getTargetCodeGenInfo();
+ }
//===--------------------------------------------------------------------===//
// Cleanups
@@ -1553,6 +1632,8 @@ public:
SourceLocation Loc = SourceLocation(),
SourceLocation StartLoc = SourceLocation());
+ static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor);
+
void EmitConstructorBody(FunctionArgList &Args);
void EmitDestructorBody(FunctionArgList &Args);
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
@@ -1671,11 +1752,6 @@ public:
llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable,
uint64_t VTableByteOffset);
- /// CanDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
- /// expr can be devirtualized.
- bool CanDevirtualizeMemberFunctionCall(const Expr *Base,
- const CXXMethodDecl *MD);
-
/// EnterDtorCleanups - Enter the cleanups necessary to complete the
/// given phase of destruction for a destructor. The end result
/// should call destructors on members and base classes in reverse
@@ -1710,6 +1786,9 @@ public:
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
SourceLocation EndLoc);
+ /// Emit a test that checks if the return value \p RV is nonnull.
+ void EmitReturnValueCheck(llvm::Value *RV);
+
/// EmitStartEHSpec - Emit the start of the exception spec.
void EmitStartEHSpec(const Decl *D);
@@ -1817,40 +1896,65 @@ public:
//===--------------------------------------------------------------------===//
LValue MakeAddrLValue(Address Addr, QualType T,
- AlignmentSource AlignSource = AlignmentSource::Type) {
- return LValue::MakeAddr(Addr, T, getContext(), AlignSource,
+ LValueBaseInfo BaseInfo =
+ LValueBaseInfo(AlignmentSource::Type)) {
+ return LValue::MakeAddr(Addr, T, getContext(), BaseInfo,
CGM.getTBAAInfo(T));
}
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
- AlignmentSource AlignSource = AlignmentSource::Type) {
+ LValueBaseInfo BaseInfo =
+ LValueBaseInfo(AlignmentSource::Type)) {
return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
- AlignSource, CGM.getTBAAInfo(T));
+ BaseInfo, CGM.getTBAAInfo(T));
}
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
CharUnits getNaturalTypeAlignment(QualType T,
- AlignmentSource *Source = nullptr,
+ LValueBaseInfo *BaseInfo = nullptr,
bool forPointeeType = false);
CharUnits getNaturalPointeeTypeAlignment(QualType T,
- AlignmentSource *Source = nullptr);
+ LValueBaseInfo *BaseInfo = nullptr);
Address EmitLoadOfReference(Address Ref, const ReferenceType *RefTy,
- AlignmentSource *Source = nullptr);
+ LValueBaseInfo *BaseInfo = nullptr);
LValue EmitLoadOfReferenceLValue(Address Ref, const ReferenceType *RefTy);
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
- AlignmentSource *Source = nullptr);
+ LValueBaseInfo *BaseInfo = nullptr);
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
- /// CreateTempAlloca - This creates a alloca and inserts it into the entry
- /// block. The caller is responsible for setting an appropriate alignment on
+ /// CreateTempAlloca - This creates an alloca and inserts it into the entry
+ /// block if \p ArraySize is nullptr, otherwise inserts it at the current
+ /// insertion point of the builder. The caller is responsible for setting an
+ /// appropriate alignment on
/// the alloca.
- llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
- const Twine &Name = "tmp");
+ ///
+ /// \p ArraySize is the number of array elements to be allocated if it
+ /// is not nullptr.
+ ///
+ /// LangAS::Default is the address space of pointers to local variables and
+ /// temporaries, as exposed in the source language. In certain
+ /// configurations, this is not the same as the alloca address space, and a
+ /// cast is needed to lift the pointer from the alloca AS into
+ /// LangAS::Default. This can happen when the target uses a restricted
+ /// address space for the stack but the source language requires
+ /// LangAS::Default to be a generic address space. The latter condition is
+ /// common for most programming languages; OpenCL is an exception in that
+ /// LangAS::Default is the private address space, which naturally maps
+ /// to the stack.
+ ///
+ /// Because the address of a temporary is often exposed to the program in
+ /// various ways, this function will perform the cast by default. The cast
+ /// may be avoided by passing false as \p CastToDefaultAddrSpace; this is
+ /// more efficient if the caller knows that the address will not be exposed.
+ llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
+ llvm::Value *ArraySize = nullptr);
Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
- const Twine &Name = "tmp");
+ const Twine &Name = "tmp",
+ llvm::Value *ArraySize = nullptr,
+ bool CastToDefaultAddrSpace = true);
/// CreateDefaultAlignedTempAlloca - This creates an alloca with the
/// default ABI alignment of the given LLVM type.
@@ -1885,9 +1989,12 @@ public:
Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
- /// appropriate alignment.
- Address CreateMemTemp(QualType T, const Twine &Name = "tmp");
- Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp");
+ /// appropriate alignment. Cast it to the default address space if
+ /// \p CastToDefaultAddrSpace is true.
+ Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
+ bool CastToDefaultAddrSpace = true);
+ Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
+ bool CastToDefaultAddrSpace = true);
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
@@ -1928,7 +2035,7 @@ public:
/// pointer to a char.
Address EmitMSVAListRef(const Expr *E);
- /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+ /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
/// always be accessible even if no aggregate location is provided.
RValue EmitAnyExprToTemp(const Expr *E);
@@ -2019,6 +2126,9 @@ public:
llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
llvm::BasicBlock *GetIndirectGotoBlock();
+ /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
+ static bool IsWrappedCXXThis(const Expr *E);
+
/// EmitNullInitialization - Generate code to set a value of the given type to
/// null, If the type contains data member pointers, they will be initialized
/// to -1 in accordance with the Itanium C++ ABI.
@@ -2230,7 +2340,9 @@ public:
TCK_Upcast,
/// Checking the operand of a cast to a virtual base object. Must be an
/// object within its lifetime.
- TCK_UpcastToVirtualBase
+ TCK_UpcastToVirtualBase,
+ /// Checking the value assigned to a _Nonnull pointer. Must not be null.
+ TCK_NonnullAssign
};
/// \brief Whether any type-checking sanitizers are enabled. If \c false,
@@ -2241,7 +2353,7 @@ public:
/// appropriate size and alignment for an object of type \p Type.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
QualType Type, CharUnits Alignment = CharUnits::Zero(),
- bool SkipNullCheck = false);
+ SanitizerSet SkippedChecks = SanitizerSet());
/// \brief Emit a check that \p Base points into an array object, which
/// we can access at index \p Index. \p Accessed should be \c false if we
@@ -2401,6 +2513,12 @@ public:
PeepholeProtection protectFromPeepholes(RValue rvalue);
void unprotectFromPeepholes(PeepholeProtection protection);
+ void EmitAlignmentAssumption(llvm::Value *PtrValue, llvm::Value *Alignment,
+ llvm::Value *OffsetValue = nullptr) {
+ Builder.CreateAlignmentAssumption(CGM.getDataLayout(), PtrValue, Alignment,
+ OffsetValue);
+ }
+
//===--------------------------------------------------------------------===//
// Statement Emission
//===--------------------------------------------------------------------===//
@@ -2463,6 +2581,15 @@ public:
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
void EmitCoroutineBody(const CoroutineBodyStmt &S);
+ void EmitCoreturnStmt(const CoreturnStmt &S);
+ RValue EmitCoawaitExpr(const CoawaitExpr &E,
+ AggValueSlot aggSlot = AggValueSlot::ignored(),
+ bool ignoreResult = false);
+ LValue EmitCoawaitLValue(const CoawaitExpr *E);
+ RValue EmitCoyieldExpr(const CoyieldExpr &E,
+ AggValueSlot aggSlot = AggValueSlot::ignored(),
+ bool ignoreResult = false);
+ LValue EmitCoyieldLValue(const CoyieldExpr *E);
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
@@ -2627,7 +2754,9 @@ public:
/// the end of the directive.
///
/// \param D Directive that has at least one 'reduction' directives.
- void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D);
+ /// \param ReductionKind The kind of reduction to perform.
+ void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
+ const OpenMPDirectiveKind ReductionKind);
/// \brief Emit initial code for linear variables. Creates private copies
/// and initializes them with the values according to OpenMP standard.
///
@@ -2678,7 +2807,6 @@ public:
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
- void EmitOMPDistributeLoop(const OMPDistributeDirective &S);
void EmitOMPDistributeParallelForDirective(
const OMPDistributeParallelForDirective &S);
void EmitOMPDistributeParallelForSimdDirective(
@@ -2704,13 +2832,16 @@ public:
void EmitOMPTargetTeamsDistributeSimdDirective(
const OMPTargetTeamsDistributeSimdDirective &S);
- /// Emit outlined function for the target directive.
- static std::pair<llvm::Function * /*OutlinedFn*/,
- llvm::Constant * /*OutlinedFnID*/>
- EmitOMPTargetDirectiveOutlinedFunction(CodeGenModule &CGM,
- const OMPTargetDirective &S,
- StringRef ParentName,
- bool IsOffloadEntry);
+ /// Emit device code for the target directive.
+ static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
+ StringRef ParentName,
+ const OMPTargetDirective &S);
+ static void
+ EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetParallelDirective &S);
+ static void
+ EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsDirective &S);
/// \brief Emit inner loop of the worksharing/simd construct.
///
/// \param S Directive, for which the inner loop must be emitted.
@@ -2732,32 +2863,78 @@ public:
void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
OMPPrivateScope &LoopScope);
+ /// Helper for the OpenMP loop directives.
+ void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
+
+ /// \brief Emit code for the worksharing loop-based directive.
+ /// \return true, if this construct has any lastprivate clause, false -
+ /// otherwise.
+ bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
+ const CodeGenLoopBoundsTy &CodeGenLoopBounds,
+ const CodeGenDispatchBoundsTy &CGDispatchBounds);
+
private:
/// Helpers for blocks
llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
/// Helpers for the OpenMP loop directives.
- void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
void EmitOMPSimdFinal(
const OMPLoopDirective &D,
const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen);
- /// \brief Emit code for the worksharing loop-based directive.
- /// \return true, if this construct has any lastprivate clause, false -
- /// otherwise.
- bool EmitOMPWorksharingLoop(const OMPLoopDirective &S);
- void EmitOMPOuterLoop(bool IsMonotonic, bool DynamicOrOrdered,
- const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
- Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk);
+
+ void EmitOMPDistributeLoop(const OMPLoopDirective &S,
+ const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
+
+ /// struct with the values to be passed to the OpenMP loop-related functions
+ struct OMPLoopArguments {
+ /// loop lower bound
+ Address LB = Address::invalid();
+ /// loop upper bound
+ Address UB = Address::invalid();
+ /// loop stride
+ Address ST = Address::invalid();
+ /// isLastIteration argument for runtime functions
+ Address IL = Address::invalid();
+ /// Chunk value generated by sema
+ llvm::Value *Chunk = nullptr;
+ /// EnsureUpperBound
+ Expr *EUB = nullptr;
+ /// IncrementExpression
+ Expr *IncExpr = nullptr;
+ /// Loop initialization
+ Expr *Init = nullptr;
+ /// Loop exit condition
+ Expr *Cond = nullptr;
+ /// Update of LB after a whole chunk has been executed
+ Expr *NextLB = nullptr;
+ /// Update of UB after a whole chunk has been executed
+ Expr *NextUB = nullptr;
+ OMPLoopArguments() = default;
+ OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
+ llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
+ Expr *IncExpr = nullptr, Expr *Init = nullptr,
+ Expr *Cond = nullptr, Expr *NextLB = nullptr,
+ Expr *NextUB = nullptr)
+ : LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
+ IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
+ NextUB(NextUB) {}
+ };
+ void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
+ const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
+ const OMPLoopArguments &LoopArgs,
+ const CodeGenLoopTy &CodeGenLoop,
+ const CodeGenOrderedTy &CodeGenOrdered);
void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
bool IsMonotonic, const OMPLoopDirective &S,
- OMPPrivateScope &LoopScope, bool Ordered, Address LB,
- Address UB, Address ST, Address IL,
- llvm::Value *Chunk);
- void EmitOMPDistributeOuterLoop(
- OpenMPDistScheduleClauseKind ScheduleKind,
- const OMPDistributeDirective &S, OMPPrivateScope &LoopScope,
- Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk);
+ OMPPrivateScope &LoopScope, bool Ordered,
+ const OMPLoopArguments &LoopArgs,
+ const CodeGenDispatchBoundsTy &CGDispatchBounds);
+ void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
+ const OMPLoopDirective &S,
+ OMPPrivateScope &LoopScope,
+ const OMPLoopArguments &LoopArgs,
+ const CodeGenLoopTy &CodeGenLoopContent);
/// \brief Emit code for sections directive.
void EmitSections(const OMPExecutableDirective &S);
@@ -2843,13 +3020,20 @@ public:
/// representation to its value representation.
llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
+ /// Check if the scalar \p Value is within the valid range for the given
+ /// type \p Ty.
+ ///
+ /// Returns true if a check is needed (even if the range is unknown).
+ bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
+ SourceLocation Loc);
+
/// EmitLoadOfScalar - Load a scalar value from an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
llvm::Value *EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty,
SourceLocation Loc,
- AlignmentSource AlignSource =
- AlignmentSource::Type,
+ LValueBaseInfo BaseInfo =
+ LValueBaseInfo(AlignmentSource::Type),
llvm::MDNode *TBAAInfo = nullptr,
QualType TBAABaseTy = QualType(),
uint64_t TBAAOffset = 0,
@@ -2866,7 +3050,8 @@ public:
/// the LLVM value representation.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr,
bool Volatile, QualType Ty,
- AlignmentSource AlignSource = AlignmentSource::Type,
+ LValueBaseInfo BaseInfo =
+ LValueBaseInfo(AlignmentSource::Type),
llvm::MDNode *TBAAInfo = nullptr, bool isInit = false,
QualType TBAABaseTy = QualType(),
uint64_t TBAAOffset = 0, bool isNontemporal = false);
@@ -2883,7 +3068,7 @@ public:
/// rvalue, returning the rvalue.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
RValue EmitLoadOfExtVectorElementLValue(LValue V);
- RValue EmitLoadOfBitfieldLValue(LValue LV);
+ RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
RValue EmitLoadOfGlobalRegLValue(LValue LV);
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
@@ -2939,7 +3124,7 @@ public:
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
Address EmitArrayToPointerDecay(const Expr *Array,
- AlignmentSource *AlignSource = nullptr);
+ LValueBaseInfo *BaseInfo = nullptr);
class ConstantEmission {
llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
@@ -3080,7 +3265,7 @@ public:
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
llvm::Value *memberPtr,
const MemberPointerType *memberPtrType,
- AlignmentSource *AlignSource = nullptr);
+ LValueBaseInfo *BaseInfo = nullptr);
RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
ReturnValueSlot ReturnValue);
@@ -3092,8 +3277,8 @@ public:
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue);
- RValue EmitCUDADevicePrintfCallExpr(const CallExpr *E,
- ReturnValueSlot ReturnValue);
+ RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue);
RValue EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E,
@@ -3149,6 +3334,8 @@ private:
public:
llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
+ llvm::Value *EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args);
+
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
@@ -3215,6 +3402,7 @@ public:
static Destroyer destroyARCStrongImprecise;
static Destroyer destroyARCStrongPrecise;
static Destroyer destroyARCWeak;
+ static Destroyer emitARCIntrinsicUse;
void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
llvm::Value *EmitObjCAutoreleasePoolPush();
@@ -3316,9 +3504,10 @@ public:
/// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
/// variables.
- void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
- const std::vector<std::pair<llvm::WeakVH,
- llvm::Constant*> > &DtorsAndObjects);
+ void GenerateCXXGlobalDtorsFunc(
+ llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>>
+ &DtorsAndObjects);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
@@ -3396,6 +3585,26 @@ public:
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
+ /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
+ /// nonnull, if \p LHS is marked _Nonnull.
+ void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
+
+ /// An enumeration which makes it easier to specify whether or not an
+ /// operation is a subtraction.
+ enum { NotSubtraction = false, IsSubtraction = true };
+
+ /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
+ /// detect undefined behavior when the pointer overflow sanitizer is enabled.
+ /// \p SignedIndices indicates whether any of the GEP indices are signed.
+ /// \p IsSubtraction indicates whether the expression used to form the GEP
+ /// is a subtraction.
+ llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
+ ArrayRef<llvm::Value *> IdxList,
+ bool SignedIndices,
+ bool IsSubtraction,
+ SourceLocation Loc,
+ const Twine &Name = "");
+
/// \brief Emit a description of a type in a format suitable for passing to
/// a runtime sanitizer handler.
llvm::Constant *EmitCheckTypeDescriptor(QualType T);
@@ -3429,13 +3638,16 @@ public:
/// "trap-func-name" if specified.
llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
+ /// \brief Emit a stub for the cross-DSO CFI check function.
+ void EmitCfiCheckStub();
+
/// \brief Emit a cross-DSO CFI failure handling function.
void EmitCfiCheckFail();
/// \brief Create a check for a function parameter that may potentially be
/// declared as non-null.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
- const FunctionDecl *FD, unsigned ParmNum);
+ AbstractCallee AC, unsigned ParmNum);
/// EmitCallArg - Emit a single call argument.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
@@ -3490,14 +3702,18 @@ private:
/// \brief Attempts to statically evaluate the object size of E. If that
/// fails, emits code to figure the size of E out for us. This is
/// pass_object_size aware.
+ ///
+ /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
llvm::Value *evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
- llvm::IntegerType *ResType);
+ llvm::IntegerType *ResType,
+ llvm::Value *EmittedE);
/// \brief Emits the size of E, as required by __builtin_object_size. This
/// function is aware of pass_object_size parameters, and will act accordingly
/// if E is a parameter with the pass_object_size attribute.
llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
- llvm::IntegerType *ResType);
+ llvm::IntegerType *ResType,
+ llvm::Value *EmittedE);
public:
#ifndef NDEBUG
@@ -3533,7 +3749,7 @@ public:
template <typename T>
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
- const FunctionDecl *CalleeDecl = nullptr,
+ AbstractCallee AC = AbstractCallee(),
unsigned ParamsToSkip = 0,
EvaluationOrder Order = EvaluationOrder::Default) {
SmallVector<QualType, 16> ArgTypes;
@@ -3575,48 +3791,40 @@ public:
for (auto *A : llvm::make_range(Arg, ArgRange.end()))
ArgTypes.push_back(CallArgTypeInfo ? getVarArgType(A) : A->getType());
- EmitCallArgs(Args, ArgTypes, ArgRange, CalleeDecl, ParamsToSkip, Order);
+ EmitCallArgs(Args, ArgTypes, ArgRange, AC, ParamsToSkip, Order);
}
void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
- const FunctionDecl *CalleeDecl = nullptr,
+ AbstractCallee AC = AbstractCallee(),
unsigned ParamsToSkip = 0,
EvaluationOrder Order = EvaluationOrder::Default);
- /// EmitPointerWithAlignment - Given an expression with a pointer
- /// type, emit the value and compute our best estimate of the
- /// alignment of the pointee.
+ /// EmitPointerWithAlignment - Given an expression with a pointer type,
+ /// emit the value and compute our best estimate of the alignment of the
+ /// pointee.
///
- /// Note that this function will conservatively fall back on the type
- /// when it doesn't
+ /// \param BaseInfo - If non-null, this will be initialized with
+ /// information about the source of the alignment and the may-alias
+ /// attribute. Note that this function will conservatively fall back on
+ /// the type when it doesn't recognize the expression and may-alias will
+ /// be set to false.
///
- /// \param Source - If non-null, this will be initialized with
- /// information about the source of the alignment. Note that this
- /// function will conservatively fall back on the type when it
- /// doesn't recognize the expression, which means that sometimes
- ///
- /// a worst-case One
- /// reasonable way to use this information is when there's a
- /// language guarantee that the pointer must be aligned to some
- /// stricter value, and we're simply trying to ensure that
- /// sufficiently obvious uses of under-aligned objects don't get
- /// miscompiled; for example, a placement new into the address of
- /// a local variable. In such a case, it's quite reasonable to
- /// just ignore the returned alignment when it isn't from an
- /// explicit source.
+ /// One reasonable way to use this information is when there's a language
+ /// guarantee that the pointer must be aligned to some stricter value, and
+ /// we're simply trying to ensure that sufficiently obvious uses of under-
+ /// aligned objects don't get miscompiled; for example, a placement new
+ /// into the address of a local variable. In such a case, it's quite
+ /// reasonable to just ignore the returned alignment when it isn't from an
+ /// explicit source.
Address EmitPointerWithAlignment(const Expr *Addr,
- AlignmentSource *Source = nullptr);
+ LValueBaseInfo *BaseInfo = nullptr);
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
private:
QualType getVarArgType(const Expr *Arg);
- const TargetCodeGenInfo &getTargetHooks() const {
- return CGM.getTargetCodeGenInfo();
- }
-
void EmitDeclMetadata();
BlockByrefHelpers *buildByrefHelpers(llvm::StructType &byrefType,
OpenPOWER on IntegriCloud