diff options
Diffstat (limited to 'lib/CodeGen/CGStmt.cpp')
-rw-r--r-- | lib/CodeGen/CGStmt.cpp | 94 |
1 files changed, 68 insertions, 26 deletions
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp index 3548dba..3153ca8 100644 --- a/lib/CodeGen/CGStmt.cpp +++ b/lib/CodeGen/CGStmt.cpp @@ -11,17 +11,17 @@ // //===----------------------------------------------------------------------===// +#include "CodeGenFunction.h" #include "CGDebugInfo.h" #include "CodeGenModule.h" -#include "CodeGenFunction.h" #include "TargetInfo.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/PrettyStackTrace.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/StringExtras.h" -#include "llvm/InlineAsm.h" -#include "llvm/Intrinsics.h" -#include "llvm/DataLayout.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/Intrinsics.h" using namespace clang; using namespace CodeGen; @@ -198,6 +198,12 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, // Keep track of the current cleanup stack depth, including debug scopes. LexicalScope Scope(*this, S.getSourceRange()); + return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot); +} + +RValue CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast, + AggValueSlot AggSlot) { + for (CompoundStmt::const_body_iterator I = S.body_begin(), E = S.body_end()-GetLast; I != E; ++I) EmitStmt(*I); @@ -313,6 +319,12 @@ CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) { } void CodeGenFunction::EmitLabel(const LabelDecl *D) { + // Add this label to the current lexical scope if we're within any + // normal cleanups. Jumps "in" to this label --- when permitted by + // the language --- may need to be routed around such cleanups. + if (EHStack.hasNormalCleanups() && CurLexicalScope) + CurLexicalScope->addLabel(D); + JumpDest &Dest = LabelMap[D]; // If we didn't need a forward reference to this label, just go @@ -324,16 +336,36 @@ void CodeGenFunction::EmitLabel(const LabelDecl *D) { // it from the branch-fixups list. } else { assert(!Dest.getScopeDepth().isValid() && "already emitted label!"); - Dest = JumpDest(Dest.getBlock(), - EHStack.stable_begin(), - Dest.getDestIndex()); - + Dest.setScopeDepth(EHStack.stable_begin()); ResolveBranchFixups(Dest.getBlock()); } EmitBlock(Dest.getBlock()); } +/// Change the cleanup scope of the labels in this lexical scope to +/// match the scope of the enclosing context. +void CodeGenFunction::LexicalScope::rescopeLabels() { + assert(!Labels.empty()); + EHScopeStack::stable_iterator innermostScope + = CGF.EHStack.getInnermostNormalCleanup(); + + // Change the scope depth of all the labels. + for (SmallVectorImpl<const LabelDecl*>::const_iterator + i = Labels.begin(), e = Labels.end(); i != e; ++i) { + assert(CGF.LabelMap.count(*i)); + JumpDest &dest = CGF.LabelMap.find(*i)->second; + assert(dest.getScopeDepth().isValid()); + assert(innermostScope.encloses(dest.getScopeDepth())); + dest.setScopeDepth(innermostScope); + } + + // Reparent the labels if the new scope also has cleanups. + if (innermostScope != EHScopeStack::stable_end() && ParentScope) { + ParentScope->Labels.append(Labels.begin(), Labels.end()); + } +} + void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { EmitLabel(S.getDecl()); @@ -735,7 +767,9 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { } else if (RV.isAggregate()) { EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty); } else { - StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false); + EmitStoreOfComplex(RV.getComplexVal(), + MakeNaturalAlignAddrLValue(ReturnValue, Ty), + /*init*/ true); } EmitBranchThroughCleanup(ReturnBlock); } @@ -760,8 +794,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { // FIXME: Clean this up by using an LValue for ReturnTemp, // EmitStoreThroughLValue, and EmitAnyExpr. - if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() && - !Target.useGlobalsForAutomaticVariables()) { + if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) { // Apply the named return value optimization for this return statement, // which means doing nothing: the appropriate result has already been // constructed into the NRVO variable. @@ -782,16 +815,26 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { // rather than the value. RValue Result = EmitReferenceBindingToExpr(RV, /*InitializedDecl=*/0); Builder.CreateStore(Result.getScalarVal(), ReturnValue); - } else if (!hasAggregateLLVMType(RV->getType())) { - Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); - } else if (RV->getType()->isAnyComplexType()) { - EmitComplexExprIntoAddr(RV, ReturnValue, false); } else { - CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType()); - EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, Qualifiers(), - AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased)); + switch (getEvaluationKind(RV->getType())) { + case TEK_Scalar: + Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); + break; + case TEK_Complex: + EmitComplexExprIntoLValue(RV, + MakeNaturalAlignAddrLValue(ReturnValue, RV->getType()), + /*isInit*/ true); + break; + case TEK_Aggregate: { + CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType()); + EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, + Qualifiers(), + AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased)); + break; + } + } } cleanupScope.ForceCleanup(); @@ -1349,7 +1392,7 @@ CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, std::string &ConstraintStr) { llvm::Value *Arg; if (Info.allowsRegister() || !Info.allowsMemory()) { - if (!CodeGenFunction::hasAggregateLLVMType(InputType)) { + if (CodeGenFunction::hasScalarEvaluationKind(InputType)) { Arg = EmitLoadOfLValue(InputValue).getScalarVal(); } else { llvm::Type *Ty = ConvertType(InputType); @@ -1378,7 +1421,7 @@ llvm::Value* CodeGenFunction::EmitAsmInput( const Expr *InputExpr, std::string &ConstraintStr) { if (Info.allowsRegister() || !Info.allowsMemory()) - if (!CodeGenFunction::hasAggregateLLVMType(InputExpr->getType())) + if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType())) return EmitScalarExpr(InputExpr); InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); @@ -1473,7 +1516,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // If this is a register output, then make the inline asm return it // by-value. If this is a memory result, return the value by-reference. - if (!Info.allowsMemory() && !hasAggregateLLVMType(OutExpr->getType())) { + if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) { Constraints += "=" + OutputConstraint; ResultRegQualTys.push_back(OutExpr->getType()); ResultRegDests.push_back(Dest); @@ -1640,9 +1683,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect, /* IsAlignStack */ false, AsmDialect); llvm::CallInst *Result = Builder.CreateCall(IA, Args); - Result->addAttribute(llvm::AttrListPtr::FunctionIndex, - llvm::Attributes::get(getLLVMContext(), - llvm::Attributes::NoUnwind)); + Result->addAttribute(llvm::AttributeSet::FunctionIndex, + llvm::Attribute::NoUnwind); // Slap the source location of the inline asm into a !srcloc metadata on the // call. FIXME: Handle metadata for MS-style inline asms. |