summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/CGStmt.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/CGStmt.cpp')
-rw-r--r--lib/CodeGen/CGStmt.cpp228
1 files changed, 165 insertions, 63 deletions
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 16145f7..cd23811 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -14,6 +14,7 @@
#include "CGDebugInfo.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
+#include "TargetInfo.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/TargetInfo.h"
@@ -69,24 +70,54 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
EmitStopPoint(S);
switch (S->getStmtClass()) {
- default:
- // Must be an expression in a stmt context. Emit the value (to get
- // side-effects) and ignore the result.
- if (!isa<Expr>(S))
- ErrorUnsupported(S, "statement");
-
- EmitAnyExpr(cast<Expr>(S), 0, false, true);
-
- // Expression emitters don't handle unreachable blocks yet, so look for one
- // explicitly here. This handles the common case of a call to a noreturn
- // function.
- if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
- if (CurBB->empty() && CurBB->use_empty()) {
- CurBB->eraseFromParent();
- Builder.ClearInsertionPoint();
- }
+ case Stmt::NoStmtClass:
+ case Stmt::CXXCatchStmtClass:
+ llvm_unreachable("invalid statement class to emit generically");
+ case Stmt::NullStmtClass:
+ case Stmt::CompoundStmtClass:
+ case Stmt::DeclStmtClass:
+ case Stmt::LabelStmtClass:
+ case Stmt::GotoStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::CaseStmtClass:
+ llvm_unreachable("should have emitted these statements as simple");
+
+#define STMT(Type, Base)
+#define ABSTRACT_STMT(Op)
+#define EXPR(Type, Base) \
+ case Stmt::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ {
+ // Remember the block we came in on.
+ llvm::BasicBlock *incoming = Builder.GetInsertBlock();
+ assert(incoming && "expression emission must have an insertion point");
+
+ EmitIgnoredExpr(cast<Expr>(S));
+
+ llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
+ assert(outgoing && "expression emission cleared block!");
+
+ // The expression emitters assume (reasonably!) that the insertion
+ // point is always set. To maintain that, the call-emission code
+ // for noreturn functions has to enter a new block with no
+ // predecessors. We want to kill that block and mark the current
+ // insertion point unreachable in the common case of a call like
+ // "exit();". Since expression emission doesn't otherwise create
+ // blocks with no predecessors, we can just test for that.
+ // However, we must be careful not to do this to our incoming
+ // block, because *statement* emission does sometimes create
+ // reachable blocks which will have no predecessors until later in
+ // the function. This occurs with, e.g., labels that are not
+ // reachable by fallthrough.
+ if (incoming != outgoing && outgoing->use_empty()) {
+ outgoing->eraseFromParent();
+ Builder.ClearInsertionPoint();
}
break;
+ }
+
case Stmt::IndirectGotoStmtClass:
EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
@@ -146,7 +177,7 @@ bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
/// this captures the expression result of the last sub-statement and returns it
/// (for use by the statement expression extension).
RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
- llvm::Value *AggLoc, bool isAggVol) {
+ AggValueSlot AggSlot) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
"LLVM IR generation of compound statement ('{}')");
@@ -178,13 +209,13 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
// emitting them before we evaluate the subexpr.
const Stmt *LastStmt = S.body_back();
while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
- EmitLabel(*LS);
+ EmitLabel(LS->getDecl());
LastStmt = LS->getSubStmt();
}
EnsureInsertPoint();
- RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
+ RV = EmitAnyExpr(cast<Expr>(LastStmt), AggSlot);
}
return RV;
@@ -246,24 +277,24 @@ void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
}
CodeGenFunction::JumpDest
-CodeGenFunction::getJumpDestForLabel(const LabelStmt *S) {
- JumpDest &Dest = LabelMap[S];
+CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
if (Dest.isValid()) return Dest;
// Create, but don't insert, the new block.
- Dest = JumpDest(createBasicBlock(S->getName()),
+ Dest = JumpDest(createBasicBlock(D->getName()),
EHScopeStack::stable_iterator::invalid(),
NextCleanupDestIndex++);
return Dest;
}
-void CodeGenFunction::EmitLabel(const LabelStmt &S) {
- JumpDest &Dest = LabelMap[&S];
+void CodeGenFunction::EmitLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
// If we didn't need a forward reference to this label, just go
// ahead and create a destination at the current scope.
if (!Dest.isValid()) {
- Dest = getJumpDestInCurrentScope(S.getName());
+ Dest = getJumpDestInCurrentScope(D->getName());
// Otherwise, we need to give this label a target depth and remove
// it from the branch-fixups list.
@@ -281,7 +312,7 @@ void CodeGenFunction::EmitLabel(const LabelStmt &S) {
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
- EmitLabel(S);
+ EmitLabel(S.getDecl());
EmitStmt(S.getSubStmt());
}
@@ -297,10 +328,14 @@ void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+ if (const LabelDecl *Target = S.getConstantTarget()) {
+ EmitBranchThroughCleanup(getJumpDestForLabel(Target));
+ return;
+ }
+
// Ensure that we have an i8* for our PHI node.
llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
- llvm::Type::getInt8PtrTy(VMContext),
- "addr");
+ Int8PtrTy, "addr");
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
@@ -320,7 +355,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm of the if/else.
@@ -395,7 +430,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
// Evaluate the conditional in the while header. C99 6.8.5.1: The
// evaluation of the controlling expression takes place before each
@@ -527,7 +562,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// declaration.
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (S.getConditionVariable()) {
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
}
// If there are any cleanups between here and the loop-exit scope,
@@ -621,11 +656,8 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// If there is an NRVO flag for this variable, set it to 1 into indicate
// that the cleanup code should not destroy the variable.
- if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) {
- const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
- llvm::Value *One = llvm::ConstantInt::get(BoolTy, 1);
- Builder.CreateStore(One, NRVOFlag);
- }
+ if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
+ Builder.CreateStore(Builder.getTrue(), NRVOFlag);
} else if (!ReturnValue) {
// Make sure not to return anything, but evaluate the expression
// for side effects.
@@ -643,7 +675,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (RV->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(RV, ReturnValue, false);
} else {
- EmitAggExpr(RV, ReturnValue, false);
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, false, true));
}
EmitBranchThroughCleanup(ReturnBlock);
@@ -713,7 +745,8 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
// Range is small enough to add multiple switch instruction cases.
for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
- SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, LHS), CaseDest);
+ SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), LHS),
+ CaseDest);
LHS++;
}
return;
@@ -735,10 +768,10 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
// Emit range check.
llvm::Value *Diff =
Builder.CreateSub(SwitchInsn->getCondition(),
- llvm::ConstantInt::get(VMContext, LHS), "tmp");
+ llvm::ConstantInt::get(getLLVMContext(), LHS), "tmp");
llvm::Value *Cond =
- Builder.CreateICmpULE(Diff,
- llvm::ConstantInt::get(VMContext, Range), "tmp");
+ Builder.CreateICmpULE(Diff, llvm::ConstantInt::get(getLLVMContext(), Range),
+ "inbounds");
Builder.CreateCondBr(Cond, CaseDest, FalseDest);
// Restore the appropriate insertion point.
@@ -757,7 +790,8 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
EmitBlock(createBasicBlock("sw.bb"));
llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
- SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+ SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), CaseVal),
+ CaseDest);
// Recursively emitting the statement is acceptable, but is not wonderful for
// code where we have many case statements nested together, i.e.:
@@ -775,7 +809,8 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
while (NextCase && NextCase->getRHS() == 0) {
CurCase = NextCase;
CaseVal = CurCase->getLHS()->EvaluateAsInt(getContext());
- SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+ SwitchInsn->addCase(llvm::ConstantInt::get(getLLVMContext(), CaseVal),
+ CaseDest);
NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
}
@@ -798,7 +833,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
- EmitLocalBlockVarDecl(*S.getConditionVariable());
+ EmitAutoVarDecl(*S.getConditionVariable());
llvm::Value *CondV = EmitScalarExpr(S.getCond());
@@ -861,14 +896,11 @@ static std::string
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
std::string Result;
- std::string tmp;
while (*Constraint) {
switch (*Constraint) {
default:
- tmp = Target.convertConstraint(*Constraint);
- if (Result.find(tmp) == std::string::npos) // Combine unique constraints
- Result += tmp;
+ Result += Target.convertConstraint(*Constraint);
break;
// Ignore these
case '*':
@@ -877,8 +909,8 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
case '=': // Will see this and the following in mult-alt constraints.
case '+':
break;
- case ',': // FIXME - Until the back-end properly supports
- return Result; // multiple alternative constraints, we stop here.
+ case ',':
+ Result += "|";
break;
case 'g':
Result += "imr";
@@ -890,7 +922,7 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
bool result = Target.resolveSymbolicName(Constraint,
&(*OutCons)[0],
OutCons->size(), Index);
- assert(result && "Could not resolve symbolic name"); result=result;
+ assert(result && "Could not resolve symbolic name"); (void)result;
Result += llvm::utostr(Index);
break;
}
@@ -902,6 +934,33 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
return Result;
}
+/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
+/// as using a particular register add that as a constraint that will be used
+/// in this asm stmt.
+static std::string
+AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
+ const TargetInfo &Target, CodeGenModule &CGM,
+ const AsmStmt &Stmt) {
+ const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
+ if (!AsmDeclRef)
+ return Constraint;
+ const ValueDecl &Value = *AsmDeclRef->getDecl();
+ const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
+ if (!Variable)
+ return Constraint;
+ AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return Constraint;
+ llvm::StringRef Register = Attr->getLabel();
+ assert(Target.isValidGCCRegisterName(Register));
+ // FIXME: We should check which registers are compatible with "r" or "x".
+ if (Constraint != "r" && Constraint != "x") {
+ CGM.ErrorUnsupported(&Stmt, "__asm__");
+ return Constraint;
+ }
+ return "{" + Register.str() + "}";
+}
+
llvm::Value*
CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
const TargetInfo::ConstraintInfo &Info,
@@ -915,7 +974,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
const llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
- Ty = llvm::IntegerType::get(VMContext, Size);
+ Ty = llvm::IntegerType::get(getLLVMContext(), Size);
Ty = llvm::PointerType::getUnqual(Ty);
Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
@@ -946,6 +1005,35 @@ llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
return EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(), ConstraintStr);
}
+/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
+/// asm call instruction. The !srcloc MDNode contains a list of constant
+/// integers which are the source locations of the start of each line in the
+/// asm.
+static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
+ CodeGenFunction &CGF) {
+ llvm::SmallVector<llvm::Value *, 8> Locs;
+ // Add the location of the first line to the MDNode.
+ Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ Str->getLocStart().getRawEncoding()));
+ llvm::StringRef StrVal = Str->getString();
+ if (!StrVal.empty()) {
+ const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
+ const LangOptions &LangOpts = CGF.CGM.getLangOptions();
+
+ // Add the location of the start of each subsequent line of the asm to the
+ // MDNode.
+ for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
+ if (StrVal[i] != '\n') continue;
+ SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
+ CGF.Target);
+ Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ LineLoc.getRawEncoding()));
+ }
+ }
+
+ return llvm::MDNode::get(CGF.getLLVMContext(), Locs.data(), Locs.size());
+}
+
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Analyze the asm string to decompose it into its pieces. We know that Sema
// has already done this, so it is guaranteed to be successful.
@@ -1010,6 +1098,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const Expr *OutExpr = S.getOutputExpr(i);
OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+ OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, Target,
+ CGM, S);
+
LValue Dest = EmitLValue(OutExpr);
if (!Constraints.empty())
Constraints += ',';
@@ -1044,6 +1135,10 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
ResultRegTypes.back() = ConvertType(InputTy);
}
}
+ if (const llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ ResultRegTypes.back()))
+ ResultRegTypes.back() = AdjTy;
} else {
ArgTypes.push_back(Dest.getAddress()->getType());
Args.push_back(Dest.getAddress());
@@ -1083,6 +1178,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
&OutputConstraintInfos);
+ InputConstraint =
+ AddVariableConstraints(InputConstraint,
+ *InputExpr->IgnoreParenNoopCasts(getContext()),
+ Target, CGM, S);
+
llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
// If this input argument is tied to a larger output result, extend the
@@ -1107,7 +1207,10 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Arg = Builder.CreateFPExt(Arg, OutputTy);
}
}
-
+ if (const llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
ArgTypes.push_back(Arg->getType());
Args.push_back(Arg);
@@ -1145,11 +1248,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const llvm::Type *ResultType;
if (ResultRegTypes.empty())
- ResultType = llvm::Type::getVoidTy(VMContext);
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
else if (ResultRegTypes.size() == 1)
ResultType = ResultRegTypes[0];
else
- ResultType = llvm::StructType::get(VMContext, ResultRegTypes);
+ ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
const llvm::FunctionType *FTy =
llvm::FunctionType::get(ResultType, ArgTypes, false);
@@ -1162,10 +1265,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Slap the source location of the inline asm into a !srcloc metadata on the
// call.
- unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding();
- llvm::Value *LocIDC =
- llvm::ConstantInt::get(Int32Ty, LocID);
- Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1));
+ Result->setMetadata("srcloc", getAsmSrcLocInfo(S.getAsmString(), *this));
// Extract all of the register value results from the asm.
std::vector<llvm::Value*> RegResults;
@@ -1192,16 +1292,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
- Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext,
- (unsigned)ResSize));
+ Tmp = Builder.CreateTrunc(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
} else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
- Tmp = Builder.CreatePtrToInt(Tmp, llvm::IntegerType::get(VMContext,
- (unsigned)TmpSize));
+ Tmp = Builder.CreatePtrToInt(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
} else if (TruncTy->isIntegerTy()) {
Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isVectorTy()) {
+ Tmp = Builder.CreateBitCast(Tmp, TruncTy);
}
}
OpenPOWER on IntegriCloud