summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h32
-rw-r--r--lib/CodeGen/BackendUtil.cpp32
-rw-r--r--lib/CodeGen/CGBlocks.cpp41
-rw-r--r--lib/CodeGen/CGBuilder.h2
-rw-r--r--lib/CodeGen/CGBuiltin.cpp2040
-rw-r--r--lib/CodeGen/CGCXX.cpp4
-rw-r--r--lib/CodeGen/CGCXXABI.cpp71
-rw-r--r--lib/CodeGen/CGCXXABI.h51
-rw-r--r--lib/CodeGen/CGCall.cpp198
-rw-r--r--lib/CodeGen/CGClass.cpp202
-rw-r--r--lib/CodeGen/CGCleanup.cpp8
-rw-r--r--lib/CodeGen/CGCleanup.h2
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp228
-rw-r--r--lib/CodeGen/CGDebugInfo.h5
-rw-r--r--lib/CodeGen/CGDecl.cpp48
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp80
-rw-r--r--lib/CodeGen/CGException.cpp80
-rw-r--r--lib/CodeGen/CGExpr.cpp239
-rw-r--r--lib/CodeGen/CGExprAgg.cpp253
-rw-r--r--lib/CodeGen/CGExprCXX.cpp136
-rw-r--r--lib/CodeGen/CGExprConstant.cpp18
-rw-r--r--lib/CodeGen/CGExprScalar.cpp30
-rw-r--r--lib/CodeGen/CGObjC.cpp192
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp97
-rw-r--r--lib/CodeGen/CGObjCMac.cpp75
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp17
-rw-r--r--lib/CodeGen/CGObjCRuntime.h20
-rw-r--r--lib/CodeGen/CGRTTI.cpp3
-rw-r--r--lib/CodeGen/CGRecordLayout.h7
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp16
-rw-r--r--lib/CodeGen/CGStmt.cpp94
-rw-r--r--lib/CodeGen/CGVTables.cpp23
-rw-r--r--lib/CodeGen/CGValue.h9
-rw-r--r--lib/CodeGen/CMakeLists.txt20
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp136
-rw-r--r--lib/CodeGen/CodeGenFunction.h47
-rw-r--r--lib/CodeGen/CodeGenModule.cpp146
-rw-r--r--lib/CodeGen/CodeGenModule.h15
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp8
-rw-r--r--lib/CodeGen/CodeGenTBAA.h6
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp4
-rw-r--r--lib/CodeGen/CodeGenTypes.h36
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp257
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp98
-rw-r--r--lib/CodeGen/TargetInfo.cpp383
45 files changed, 2226 insertions, 3283 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 2853bc8..86f5380 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -74,31 +74,42 @@ namespace clang {
unsigned UIntData;
bool BoolData0;
bool BoolData1;
+ bool InReg;
- ABIArgInfo(Kind K, llvm::Type *TD=0, unsigned UI=0,
- bool B0 = false, bool B1 = false, llvm::Type* P = 0)
+ ABIArgInfo(Kind K, llvm::Type *TD, unsigned UI, bool B0, bool B1, bool IR,
+ llvm::Type* P)
: TheKind(K), TypeData(TD), PaddingType(P), UIntData(UI), BoolData0(B0),
- BoolData1(B1) {}
+ BoolData1(B1), InReg(IR) {}
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0,
llvm::Type *Padding = 0) {
- return ABIArgInfo(Direct, T, Offset, false, false, Padding);
+ return ABIArgInfo(Direct, T, Offset, false, false, false, Padding);
+ }
+ static ABIArgInfo getDirectInReg(llvm::Type *T) {
+ return ABIArgInfo(Direct, T, 0, false, false, true, 0);
}
static ABIArgInfo getExtend(llvm::Type *T = 0) {
- return ABIArgInfo(Extend, T, 0);
+ return ABIArgInfo(Extend, T, 0, false, false, false, 0);
+ }
+ static ABIArgInfo getExtendInReg(llvm::Type *T = 0) {
+ return ABIArgInfo(Extend, T, 0, false, false, true, 0);
}
static ABIArgInfo getIgnore() {
- return ABIArgInfo(Ignore);
+ return ABIArgInfo(Ignore, 0, 0, false, false, false, 0);
}
static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true
, bool Realign = false) {
- return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign);
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, false, 0);
+ }
+ static ABIArgInfo getIndirectInReg(unsigned Alignment, bool ByVal = true
+ , bool Realign = false) {
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, true, 0);
}
static ABIArgInfo getExpand() {
- return ABIArgInfo(Expand);
+ return ABIArgInfo(Expand, 0, 0, false, false, false, 0);
}
Kind getKind() const { return TheKind; }
@@ -132,6 +143,11 @@ namespace clang {
TypeData = T;
}
+ bool getInReg() const {
+ assert((isDirect() || isExtend() || isIndirect()) && "Invalid kind!");
+ return InReg;
+ }
+
// Indirect accessors
unsigned getIndirectAlign() const {
assert(TheKind == Indirect && "Invalid kind!");
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 2f44711..0a1915b 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -121,6 +121,12 @@ static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase
PM.add(createObjCARCOptPass());
}
+static unsigned BoundsChecking;
+static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ PM.add(createBoundsCheckingPass(BoundsChecking));
+}
+
static void addAddressSanitizerPass(const PassManagerBuilder &Builder,
PassManagerBase &PM) {
PM.add(createAddressSanitizerPass());
@@ -160,6 +166,14 @@ void EmitAssemblyHelper::CreatePasses() {
addObjCARCOptPass);
}
+ if (CodeGenOpts.BoundsChecking > 0) {
+ BoundsChecking = CodeGenOpts.BoundsChecking;
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addBoundsCheckingPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addBoundsCheckingPass);
+ }
+
if (LangOpts.AddressSanitizer) {
PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
addAddressSanitizerPass);
@@ -219,7 +233,7 @@ void EmitAssemblyHelper::CreatePasses() {
CodeGenOpts.EmitGcovArcs,
TargetTriple.isMacOSX()));
- if (!CodeGenOpts.DebugInfo)
+ if (CodeGenOpts.DebugInfo == CodeGenOptions::NoDebugInfo)
MPM->add(createStripSymbolsPass(true));
}
@@ -324,6 +338,9 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
Options.NoFramePointerElimNonLeaf = true;
}
+ if (CodeGenOpts.UseInitArray)
+ Options.UseInitArray = true;
+
// Set float ABI type.
if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
Options.FloatABIType = llvm::FloatABI::Soft;
@@ -334,6 +351,19 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
Options.FloatABIType = llvm::FloatABI::Default;
}
+ // Set FP fusion mode.
+ switch (LangOpts.getFPContractMode()) {
+ case LangOptions::FPC_Off:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Strict;
+ break;
+ case LangOptions::FPC_On:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
+ break;
+ case LangOptions::FPC_Fast:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Fast;
+ break;
+ }
+
Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index f8c7bcd..37ef4af 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -458,19 +458,23 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
}
}
+ assert(endAlign == getLowBit(blockSize));
+
// At this point, we just have to add padding if the end align still
// isn't aligned right.
if (endAlign < maxFieldAlign) {
- CharUnits padding = maxFieldAlign - endAlign;
+ CharUnits newBlockSize = blockSize.RoundUpToAlignment(maxFieldAlign);
+ CharUnits padding = newBlockSize - blockSize;
elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
padding.getQuantity()));
- blockSize += padding;
-
- endAlign = getLowBit(blockSize);
- assert(endAlign >= maxFieldAlign);
+ blockSize = newBlockSize;
+ endAlign = getLowBit(blockSize); // might be > maxFieldAlign
}
+ assert(endAlign >= maxFieldAlign);
+ assert(endAlign == getLowBit(blockSize));
+
// Slam everything else on now. This works because they have
// strictly decreasing alignment and we expect that size is always a
// multiple of alignment.
@@ -626,7 +630,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Using the computed layout, generate the actual block function.
bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
llvm::Constant *blockFn
- = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, blockInfo,
+ = CodeGenFunction(CGM, true).GenerateBlockFunction(CurGD, blockInfo,
CurFuncDecl, LocalDeclMap,
isLambdaConv);
blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
@@ -694,7 +698,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Compute the address of the thing we're going to move into the
// block literal.
llvm::Value *src;
- if (ci->isNested()) {
+ if (BlockInfo && ci->isNested()) {
// We need to use the capture from the enclosing block.
const CGBlockInfo::Capture &enclosingCapture =
BlockInfo->getCapture(variable);
@@ -872,7 +876,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
const CGFunctionInfo &FnInfo =
- CGM.getTypes().arrangeFunctionCall(Args, FuncTy);
+ CGM.getTypes().arrangeFreeFunctionCall(Args, FuncTy);
// Cast the function pointer to the right type.
llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo);
@@ -999,7 +1003,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
// Check if we should generate debug info for this block function.
if (CGM.getModuleDebugInfo())
DebugInfo = CGM.getModuleDebugInfo();
-
+ CurGD = GD;
+
BlockInfo = &blockInfo;
// Arrange for local static and local extern declarations to appear
@@ -1130,15 +1135,17 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const VarDecl *variable = ci->getVariable();
DI->EmitLocation(Builder, variable->getLocation());
- const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
- if (capture.isConstant()) {
- DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
- Builder);
- continue;
- }
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) {
+ DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
+ Builder);
+ continue;
+ }
- DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointer,
- Builder, blockInfo);
+ DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointer,
+ Builder, blockInfo);
+ }
}
}
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
index 8120217..a790a74 100644
--- a/lib/CodeGen/CGBuilder.h
+++ b/lib/CodeGen/CGBuilder.h
@@ -10,7 +10,7 @@
#ifndef CLANG_CODEGEN_CGBUILDER_H
#define CLANG_CODEGEN_CGBUILDER_H
-#include "llvm/Support/IRBuilder.h"
+#include "llvm/IRBuilder.h"
namespace clang {
namespace CodeGen {
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index e30b513..65c782e 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -335,6 +335,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(F, ArgValue));
}
case Builtin::BI__builtin_object_size: {
+ // We rely on constant folding to deal with expressions with side effects.
+ assert(!E->getArg(0)->HasSideEffects(getContext()) &&
+ "should have been constant folded");
+
// We pass this builtin onto the optimizer so that it can
// figure out the object size in more complex cases.
llvm::Type *ResType = ConvertType(E->getType());
@@ -348,9 +352,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType);
- return RValue::get(Builder.CreateCall2(F,
- EmitScalarExpr(E->getArg(0)),
- CI));
+ return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI));
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
@@ -363,6 +365,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
}
+ case Builtin::BI__builtin_readcyclecounter: {
+ Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
+ return RValue::get(Builder.CreateCall(F));
+ }
case Builtin::BI__builtin_trap: {
Value *F = CGM.getIntrinsic(Intrinsic::trap);
return RValue::get(Builder.CreateCall(F));
@@ -982,9 +988,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
getContext().VoidPtrTy);
const CGFunctionInfo &FuncInfo =
- CGM.getTypes().arrangeFunctionCall(E->getType(), Args,
- FunctionType::ExtInfo(),
- RequiredArgs::All);
+ CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
@@ -1376,8 +1382,6 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
return EmitPPCBuiltinExpr(BuiltinID, E);
- case llvm::Triple::hexagon:
- return EmitHexagonBuiltinExpr(BuiltinID, E);
default:
return 0;
}
@@ -1629,13 +1633,17 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vclz_v:
case ARM::BI__builtin_neon_vclzq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, Ty);
+ // Generate target-independent intrinsic; also need to add second argument
+ // for whether or not clz of zero is undefined; on ARM it isn't.
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ty);
+ Ops.push_back(Builder.getInt1(Target.isCLZForZeroUndef()));
return EmitNeonCall(F, Ops, "vclz");
}
case ARM::BI__builtin_neon_vcnt_v:
case ARM::BI__builtin_neon_vcntq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, Ty);
- return EmitNeonCall(F, Ops, "vcnt");
+ // generate target-independent intrinsic
+ Function *F = CGM.getIntrinsic(Intrinsic::ctpop, Ty);
+ return EmitNeonCall(F, Ops, "vctpop");
}
case ARM::BI__builtin_neon_vcvt_f16_v: {
assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
@@ -2411,8 +2419,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_movntps:
+ case X86::BI__builtin_ia32_movntps256:
case X86::BI__builtin_ia32_movntpd:
+ case X86::BI__builtin_ia32_movntpd256:
case X86::BI__builtin_ia32_movntdq:
+ case X86::BI__builtin_ia32_movntdq256:
case X86::BI__builtin_ia32_movnti: {
llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
Builder.getInt32(1));
@@ -2444,1996 +2455,31 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, name);
}
- }
-}
-
-
-Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- llvm::SmallVector<Value*, 4> Ops;
-
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
-
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
-
- switch (BuiltinID) {
- default: return 0;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpeq:
- ID = Intrinsic::hexagon_C2_cmpeq; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgt:
- ID = Intrinsic::hexagon_C2_cmpgt; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgtu:
- ID = Intrinsic::hexagon_C2_cmpgtu; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpeqp:
- ID = Intrinsic::hexagon_C2_cmpeqp; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgtp:
- ID = Intrinsic::hexagon_C2_cmpgtp; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgtup:
- ID = Intrinsic::hexagon_C2_cmpgtup; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_bitsset:
- ID = Intrinsic::hexagon_C2_bitsset; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_bitsclr:
- ID = Intrinsic::hexagon_C2_bitsclr; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpeqi:
- ID = Intrinsic::hexagon_C2_cmpeqi; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgti:
- ID = Intrinsic::hexagon_C2_cmpgti; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgtui:
- ID = Intrinsic::hexagon_C2_cmpgtui; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgei:
- ID = Intrinsic::hexagon_C2_cmpgei; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpgeui:
- ID = Intrinsic::hexagon_C2_cmpgeui; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmplt:
- ID = Intrinsic::hexagon_C2_cmplt; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_cmpltu:
- ID = Intrinsic::hexagon_C2_cmpltu; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_bitsclri:
- ID = Intrinsic::hexagon_C2_bitsclri; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_and:
- ID = Intrinsic::hexagon_C2_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_or:
- ID = Intrinsic::hexagon_C2_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_xor:
- ID = Intrinsic::hexagon_C2_xor; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_andn:
- ID = Intrinsic::hexagon_C2_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_not:
- ID = Intrinsic::hexagon_C2_not; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_orn:
- ID = Intrinsic::hexagon_C2_orn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_pxfer_map:
- ID = Intrinsic::hexagon_C2_pxfer_map; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_any8:
- ID = Intrinsic::hexagon_C2_any8; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_all8:
- ID = Intrinsic::hexagon_C2_all8; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_vitpack:
- ID = Intrinsic::hexagon_C2_vitpack; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_mux:
- ID = Intrinsic::hexagon_C2_mux; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_muxii:
- ID = Intrinsic::hexagon_C2_muxii; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_muxir:
- ID = Intrinsic::hexagon_C2_muxir; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_muxri:
- ID = Intrinsic::hexagon_C2_muxri; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_vmux:
- ID = Intrinsic::hexagon_C2_vmux; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_mask:
- ID = Intrinsic::hexagon_C2_mask; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpbeq:
- ID = Intrinsic::hexagon_A2_vcmpbeq; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpbgtu:
- ID = Intrinsic::hexagon_A2_vcmpbgtu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpheq:
- ID = Intrinsic::hexagon_A2_vcmpheq; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmphgt:
- ID = Intrinsic::hexagon_A2_vcmphgt; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmphgtu:
- ID = Intrinsic::hexagon_A2_vcmphgtu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpweq:
- ID = Intrinsic::hexagon_A2_vcmpweq; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgt:
- ID = Intrinsic::hexagon_A2_vcmpwgt; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgtu:
- ID = Intrinsic::hexagon_A2_vcmpwgtu; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_tfrpr:
- ID = Intrinsic::hexagon_C2_tfrpr; break;
-
- case Hexagon::BI__builtin_HEXAGON_C2_tfrrp:
- ID = Intrinsic::hexagon_C2_tfrrp; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1:
- ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyd_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyd_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyd_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyd_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyu_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyu_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyu_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyu_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyu_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyu_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_hh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_hh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s0:
- ID = Intrinsic::hexagon_M2_mpyud_hl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s1:
- ID = Intrinsic::hexagon_M2_mpyud_hl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s0:
- ID = Intrinsic::hexagon_M2_mpyud_lh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s1:
- ID = Intrinsic::hexagon_M2_mpyud_lh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s0:
- ID = Intrinsic::hexagon_M2_mpyud_ll_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s1:
- ID = Intrinsic::hexagon_M2_mpyud_ll_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpysmi:
- ID = Intrinsic::hexagon_M2_mpysmi; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_macsip:
- ID = Intrinsic::hexagon_M2_macsip; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_macsin:
- ID = Intrinsic::hexagon_M2_macsin; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_s0:
- ID = Intrinsic::hexagon_M2_dpmpyss_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_acc_s0:
- ID = Intrinsic::hexagon_M2_dpmpyss_acc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_nac_s0:
- ID = Intrinsic::hexagon_M2_dpmpyss_nac_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_s0:
- ID = Intrinsic::hexagon_M2_dpmpyuu_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_acc_s0:
- ID = Intrinsic::hexagon_M2_dpmpyuu_acc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_nac_s0:
- ID = Intrinsic::hexagon_M2_dpmpyuu_nac_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpy_up:
- ID = Intrinsic::hexagon_M2_mpy_up; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyu_up:
- ID = Intrinsic::hexagon_M2_mpyu_up; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_rnd_s0:
- ID = Intrinsic::hexagon_M2_dpmpyss_rnd_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyi:
- ID = Intrinsic::hexagon_M2_mpyi; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mpyui:
- ID = Intrinsic::hexagon_M2_mpyui; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_maci:
- ID = Intrinsic::hexagon_M2_maci; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_acci:
- ID = Intrinsic::hexagon_M2_acci; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_accii:
- ID = Intrinsic::hexagon_M2_accii; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_nacci:
- ID = Intrinsic::hexagon_M2_nacci; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_naccii:
- ID = Intrinsic::hexagon_M2_naccii; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_subacc:
- ID = Intrinsic::hexagon_M2_subacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0:
- ID = Intrinsic::hexagon_M2_vmpy2s_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1:
- ID = Intrinsic::hexagon_M2_vmpy2s_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s0:
- ID = Intrinsic::hexagon_M2_vmac2s_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s1:
- ID = Intrinsic::hexagon_M2_vmac2s_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0pack:
- ID = Intrinsic::hexagon_M2_vmpy2s_s0pack; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1pack:
- ID = Intrinsic::hexagon_M2_vmpy2s_s1pack; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2:
- ID = Intrinsic::hexagon_M2_vmac2; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s0:
- ID = Intrinsic::hexagon_M2_vmpy2es_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s1:
- ID = Intrinsic::hexagon_M2_vmpy2es_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s0:
- ID = Intrinsic::hexagon_M2_vmac2es_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s1:
- ID = Intrinsic::hexagon_M2_vmac2es_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vmac2es:
- ID = Intrinsic::hexagon_M2_vmac2es; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrmac_s0:
- ID = Intrinsic::hexagon_M2_vrmac_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrmpy_s0:
- ID = Intrinsic::hexagon_M2_vrmpy_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s0:
- ID = Intrinsic::hexagon_M2_vdmpyrs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s1:
- ID = Intrinsic::hexagon_M2_vdmpyrs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s0:
- ID = Intrinsic::hexagon_M2_vdmacs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s1:
- ID = Intrinsic::hexagon_M2_vdmacs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s0:
- ID = Intrinsic::hexagon_M2_vdmpys_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s1:
- ID = Intrinsic::hexagon_M2_vdmpys_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s0:
- ID = Intrinsic::hexagon_M2_cmpyrs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s1:
- ID = Intrinsic::hexagon_M2_cmpyrs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s0:
- ID = Intrinsic::hexagon_M2_cmpyrsc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s1:
- ID = Intrinsic::hexagon_M2_cmpyrsc_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s0:
- ID = Intrinsic::hexagon_M2_cmacs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s1:
- ID = Intrinsic::hexagon_M2_cmacs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s0:
- ID = Intrinsic::hexagon_M2_cmacsc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s1:
- ID = Intrinsic::hexagon_M2_cmacsc_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s0:
- ID = Intrinsic::hexagon_M2_cmpys_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s1:
- ID = Intrinsic::hexagon_M2_cmpys_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s0:
- ID = Intrinsic::hexagon_M2_cmpysc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s1:
- ID = Intrinsic::hexagon_M2_cmpysc_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s0:
- ID = Intrinsic::hexagon_M2_cnacs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s1:
- ID = Intrinsic::hexagon_M2_cnacs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s0:
- ID = Intrinsic::hexagon_M2_cnacsc_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s1:
- ID = Intrinsic::hexagon_M2_cnacsc_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1:
- ID = Intrinsic::hexagon_M2_vrcmpys_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_acc_s1:
- ID = Intrinsic::hexagon_M2_vrcmpys_acc_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1rp:
- ID = Intrinsic::hexagon_M2_vrcmpys_s1rp; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s0:
- ID = Intrinsic::hexagon_M2_mmacls_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s1:
- ID = Intrinsic::hexagon_M2_mmacls_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s0:
- ID = Intrinsic::hexagon_M2_mmachs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s1:
- ID = Intrinsic::hexagon_M2_mmachs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s0:
- ID = Intrinsic::hexagon_M2_mmpyl_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s1:
- ID = Intrinsic::hexagon_M2_mmpyl_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s0:
- ID = Intrinsic::hexagon_M2_mmpyh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s1:
- ID = Intrinsic::hexagon_M2_mmpyh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs0:
- ID = Intrinsic::hexagon_M2_mmacls_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs1:
- ID = Intrinsic::hexagon_M2_mmacls_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs0:
- ID = Intrinsic::hexagon_M2_mmachs_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs1:
- ID = Intrinsic::hexagon_M2_mmachs_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs0:
- ID = Intrinsic::hexagon_M2_mmpyl_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs1:
- ID = Intrinsic::hexagon_M2_mmpyl_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs0:
- ID = Intrinsic::hexagon_M2_mmpyh_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs1:
- ID = Intrinsic::hexagon_M2_mmpyh_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_hmmpyl_rs1:
- ID = Intrinsic::hexagon_M2_hmmpyl_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_hmmpyh_rs1:
- ID = Intrinsic::hexagon_M2_hmmpyh_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s0:
- ID = Intrinsic::hexagon_M2_mmaculs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s1:
- ID = Intrinsic::hexagon_M2_mmaculs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s0:
- ID = Intrinsic::hexagon_M2_mmacuhs_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s1:
- ID = Intrinsic::hexagon_M2_mmacuhs_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s0:
- ID = Intrinsic::hexagon_M2_mmpyul_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s1:
- ID = Intrinsic::hexagon_M2_mmpyul_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s0:
- ID = Intrinsic::hexagon_M2_mmpyuh_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s1:
- ID = Intrinsic::hexagon_M2_mmpyuh_s1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs0:
- ID = Intrinsic::hexagon_M2_mmaculs_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs1:
- ID = Intrinsic::hexagon_M2_mmaculs_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs0:
- ID = Intrinsic::hexagon_M2_mmacuhs_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs1:
- ID = Intrinsic::hexagon_M2_mmacuhs_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs0:
- ID = Intrinsic::hexagon_M2_mmpyul_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs1:
- ID = Intrinsic::hexagon_M2_mmpyul_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs0:
- ID = Intrinsic::hexagon_M2_mmpyuh_rs0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs1:
- ID = Intrinsic::hexagon_M2_mmpyuh_rs1; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0:
- ID = Intrinsic::hexagon_M2_vrcmaci_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0:
- ID = Intrinsic::hexagon_M2_vrcmacr_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0c:
- ID = Intrinsic::hexagon_M2_vrcmaci_s0c; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0c:
- ID = Intrinsic::hexagon_M2_vrcmacr_s0c; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmaci_s0:
- ID = Intrinsic::hexagon_M2_cmaci_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmacr_s0:
- ID = Intrinsic::hexagon_M2_cmacr_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0:
- ID = Intrinsic::hexagon_M2_vrcmpyi_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0:
- ID = Intrinsic::hexagon_M2_vrcmpyr_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0c:
- ID = Intrinsic::hexagon_M2_vrcmpyi_s0c; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0c:
- ID = Intrinsic::hexagon_M2_vrcmpyr_s0c; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyi_s0:
- ID = Intrinsic::hexagon_M2_cmpyi_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_cmpyr_s0:
- ID = Intrinsic::hexagon_M2_cmpyr_s0; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_i:
- ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_r:
- ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_i:
- ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_r:
- ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_i:
- ID = Intrinsic::hexagon_M2_vcmac_s0_sat_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_r:
- ID = Intrinsic::hexagon_M2_vcmac_s0_sat_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vcrotate:
- ID = Intrinsic::hexagon_S2_vcrotate; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_add:
- ID = Intrinsic::hexagon_A2_add; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sub:
- ID = Intrinsic::hexagon_A2_sub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addsat:
- ID = Intrinsic::hexagon_A2_addsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subsat:
- ID = Intrinsic::hexagon_A2_subsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addi:
- ID = Intrinsic::hexagon_A2_addi; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_ll:
- ID = Intrinsic::hexagon_A2_addh_l16_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_hl:
- ID = Intrinsic::hexagon_A2_addh_l16_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_ll:
- ID = Intrinsic::hexagon_A2_addh_l16_sat_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_hl:
- ID = Intrinsic::hexagon_A2_addh_l16_sat_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_ll:
- ID = Intrinsic::hexagon_A2_subh_l16_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_hl:
- ID = Intrinsic::hexagon_A2_subh_l16_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_ll:
- ID = Intrinsic::hexagon_A2_subh_l16_sat_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_hl:
- ID = Intrinsic::hexagon_A2_subh_l16_sat_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_ll:
- ID = Intrinsic::hexagon_A2_addh_h16_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_lh:
- ID = Intrinsic::hexagon_A2_addh_h16_lh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hl:
- ID = Intrinsic::hexagon_A2_addh_h16_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hh:
- ID = Intrinsic::hexagon_A2_addh_h16_hh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_ll:
- ID = Intrinsic::hexagon_A2_addh_h16_sat_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_lh:
- ID = Intrinsic::hexagon_A2_addh_h16_sat_lh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hl:
- ID = Intrinsic::hexagon_A2_addh_h16_sat_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hh:
- ID = Intrinsic::hexagon_A2_addh_h16_sat_hh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_ll:
- ID = Intrinsic::hexagon_A2_subh_h16_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_lh:
- ID = Intrinsic::hexagon_A2_subh_h16_lh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hl:
- ID = Intrinsic::hexagon_A2_subh_h16_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hh:
- ID = Intrinsic::hexagon_A2_subh_h16_hh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_ll:
- ID = Intrinsic::hexagon_A2_subh_h16_sat_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_lh:
- ID = Intrinsic::hexagon_A2_subh_h16_sat_lh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hl:
- ID = Intrinsic::hexagon_A2_subh_h16_sat_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hh:
- ID = Intrinsic::hexagon_A2_subh_h16_sat_hh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_aslh:
- ID = Intrinsic::hexagon_A2_aslh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_asrh:
- ID = Intrinsic::hexagon_A2_asrh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addp:
- ID = Intrinsic::hexagon_A2_addp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addpsat:
- ID = Intrinsic::hexagon_A2_addpsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_addsp:
- ID = Intrinsic::hexagon_A2_addsp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subp:
- ID = Intrinsic::hexagon_A2_subp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_neg:
- ID = Intrinsic::hexagon_A2_neg; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_negsat:
- ID = Intrinsic::hexagon_A2_negsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_abs:
- ID = Intrinsic::hexagon_A2_abs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_abssat:
- ID = Intrinsic::hexagon_A2_abssat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vconj:
- ID = Intrinsic::hexagon_A2_vconj; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_negp:
- ID = Intrinsic::hexagon_A2_negp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_absp:
- ID = Intrinsic::hexagon_A2_absp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_max:
- ID = Intrinsic::hexagon_A2_max; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_maxu:
- ID = Intrinsic::hexagon_A2_maxu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_min:
- ID = Intrinsic::hexagon_A2_min; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_minu:
- ID = Intrinsic::hexagon_A2_minu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_maxp:
- ID = Intrinsic::hexagon_A2_maxp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_maxup:
- ID = Intrinsic::hexagon_A2_maxup; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_minp:
- ID = Intrinsic::hexagon_A2_minp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_minup:
- ID = Intrinsic::hexagon_A2_minup; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfr:
- ID = Intrinsic::hexagon_A2_tfr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfrsi:
- ID = Intrinsic::hexagon_A2_tfrsi; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfrp:
- ID = Intrinsic::hexagon_A2_tfrp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfrpi:
- ID = Intrinsic::hexagon_A2_tfrpi; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_zxtb:
- ID = Intrinsic::hexagon_A2_zxtb; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sxtb:
- ID = Intrinsic::hexagon_A2_sxtb; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_zxth:
- ID = Intrinsic::hexagon_A2_zxth; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sxth:
- ID = Intrinsic::hexagon_A2_sxth; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combinew:
- ID = Intrinsic::hexagon_A2_combinew; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combineii:
- ID = Intrinsic::hexagon_A2_combineii; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combine_hh:
- ID = Intrinsic::hexagon_A2_combine_hh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combine_hl:
- ID = Intrinsic::hexagon_A2_combine_hl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combine_lh:
- ID = Intrinsic::hexagon_A2_combine_lh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_combine_ll:
- ID = Intrinsic::hexagon_A2_combine_ll; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfril:
- ID = Intrinsic::hexagon_A2_tfril; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_tfrih:
- ID = Intrinsic::hexagon_A2_tfrih; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_and:
- ID = Intrinsic::hexagon_A2_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_or:
- ID = Intrinsic::hexagon_A2_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_xor:
- ID = Intrinsic::hexagon_A2_xor; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_not:
- ID = Intrinsic::hexagon_A2_not; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_xor_xacc:
- ID = Intrinsic::hexagon_M2_xor_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_subri:
- ID = Intrinsic::hexagon_A2_subri; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_andir:
- ID = Intrinsic::hexagon_A2_andir; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_orir:
- ID = Intrinsic::hexagon_A2_orir; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_andp:
- ID = Intrinsic::hexagon_A2_andp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_orp:
- ID = Intrinsic::hexagon_A2_orp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_xorp:
- ID = Intrinsic::hexagon_A2_xorp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_notp:
- ID = Intrinsic::hexagon_A2_notp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sxtw:
- ID = Intrinsic::hexagon_A2_sxtw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sat:
- ID = Intrinsic::hexagon_A2_sat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_sath:
- ID = Intrinsic::hexagon_A2_sath; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_satuh:
- ID = Intrinsic::hexagon_A2_satuh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_satub:
- ID = Intrinsic::hexagon_A2_satub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_satb:
- ID = Intrinsic::hexagon_A2_satb; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddub:
- ID = Intrinsic::hexagon_A2_vaddub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddubs:
- ID = Intrinsic::hexagon_A2_vaddubs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddh:
- ID = Intrinsic::hexagon_A2_vaddh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddhs:
- ID = Intrinsic::hexagon_A2_vaddhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vadduhs:
- ID = Intrinsic::hexagon_A2_vadduhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddw:
- ID = Intrinsic::hexagon_A2_vaddw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vaddws:
- ID = Intrinsic::hexagon_A2_vaddws; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svavgh:
- ID = Intrinsic::hexagon_A2_svavgh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svavghs:
- ID = Intrinsic::hexagon_A2_svavghs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svnavgh:
- ID = Intrinsic::hexagon_A2_svnavgh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svaddh:
- ID = Intrinsic::hexagon_A2_svaddh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svaddhs:
- ID = Intrinsic::hexagon_A2_svaddhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svadduhs:
- ID = Intrinsic::hexagon_A2_svadduhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svsubh:
- ID = Intrinsic::hexagon_A2_svsubh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svsubhs:
- ID = Intrinsic::hexagon_A2_svsubhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_svsubuhs:
- ID = Intrinsic::hexagon_A2_svsubuhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vraddub:
- ID = Intrinsic::hexagon_A2_vraddub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vraddub_acc:
- ID = Intrinsic::hexagon_A2_vraddub_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vradduh:
- ID = Intrinsic::hexagon_M2_vradduh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubub:
- ID = Intrinsic::hexagon_A2_vsubub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsububs:
- ID = Intrinsic::hexagon_A2_vsububs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubh:
- ID = Intrinsic::hexagon_A2_vsubh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubhs:
- ID = Intrinsic::hexagon_A2_vsubhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubuhs:
- ID = Intrinsic::hexagon_A2_vsubuhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubw:
- ID = Intrinsic::hexagon_A2_vsubw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vsubws:
- ID = Intrinsic::hexagon_A2_vsubws; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vabsh:
- ID = Intrinsic::hexagon_A2_vabsh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vabshsat:
- ID = Intrinsic::hexagon_A2_vabshsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vabsw:
- ID = Intrinsic::hexagon_A2_vabsw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vabswsat:
- ID = Intrinsic::hexagon_A2_vabswsat; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffw:
- ID = Intrinsic::hexagon_M2_vabsdiffw; break;
-
- case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffh:
- ID = Intrinsic::hexagon_M2_vabsdiffh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vrsadub:
- ID = Intrinsic::hexagon_A2_vrsadub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vrsadub_acc:
- ID = Intrinsic::hexagon_A2_vrsadub_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgub:
- ID = Intrinsic::hexagon_A2_vavgub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavguh:
- ID = Intrinsic::hexagon_A2_vavguh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgh:
- ID = Intrinsic::hexagon_A2_vavgh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavgh:
- ID = Intrinsic::hexagon_A2_vnavgh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgw:
- ID = Intrinsic::hexagon_A2_vavgw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavgw:
- ID = Intrinsic::hexagon_A2_vnavgw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgwr:
- ID = Intrinsic::hexagon_A2_vavgwr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavgwr:
- ID = Intrinsic::hexagon_A2_vnavgwr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgwcr:
- ID = Intrinsic::hexagon_A2_vavgwcr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavgwcr:
- ID = Intrinsic::hexagon_A2_vnavgwcr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavghcr:
- ID = Intrinsic::hexagon_A2_vavghcr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavghcr:
- ID = Intrinsic::hexagon_A2_vnavghcr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavguw:
- ID = Intrinsic::hexagon_A2_vavguw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavguwr:
- ID = Intrinsic::hexagon_A2_vavguwr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavgubr:
- ID = Intrinsic::hexagon_A2_vavgubr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavguhr:
- ID = Intrinsic::hexagon_A2_vavguhr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vavghr:
- ID = Intrinsic::hexagon_A2_vavghr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vnavghr:
- ID = Intrinsic::hexagon_A2_vnavghr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vminh:
- ID = Intrinsic::hexagon_A2_vminh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vmaxh:
- ID = Intrinsic::hexagon_A2_vmaxh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vminub:
- ID = Intrinsic::hexagon_A2_vminub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vmaxub:
- ID = Intrinsic::hexagon_A2_vmaxub; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vminuh:
- ID = Intrinsic::hexagon_A2_vminuh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vmaxuh:
- ID = Intrinsic::hexagon_A2_vmaxuh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vminw:
- ID = Intrinsic::hexagon_A2_vminw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vmaxw:
- ID = Intrinsic::hexagon_A2_vmaxw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vminuw:
- ID = Intrinsic::hexagon_A2_vminuw; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_vmaxuw:
- ID = Intrinsic::hexagon_A2_vmaxuw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r:
- ID = Intrinsic::hexagon_S2_asr_r_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r:
- ID = Intrinsic::hexagon_S2_asl_r_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r:
- ID = Intrinsic::hexagon_S2_lsr_r_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r:
- ID = Intrinsic::hexagon_S2_lsl_r_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p:
- ID = Intrinsic::hexagon_S2_asr_r_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p:
- ID = Intrinsic::hexagon_S2_asl_r_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p:
- ID = Intrinsic::hexagon_S2_lsr_r_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p:
- ID = Intrinsic::hexagon_S2_lsl_r_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_acc:
- ID = Intrinsic::hexagon_S2_asr_r_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_acc:
- ID = Intrinsic::hexagon_S2_asl_r_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_acc:
- ID = Intrinsic::hexagon_S2_lsr_r_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_acc:
- ID = Intrinsic::hexagon_S2_lsl_r_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_acc:
- ID = Intrinsic::hexagon_S2_asr_r_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_acc:
- ID = Intrinsic::hexagon_S2_asl_r_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_acc:
- ID = Intrinsic::hexagon_S2_lsr_r_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_acc:
- ID = Intrinsic::hexagon_S2_lsl_r_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_nac:
- ID = Intrinsic::hexagon_S2_asr_r_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_nac:
- ID = Intrinsic::hexagon_S2_asl_r_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_nac:
- ID = Intrinsic::hexagon_S2_lsr_r_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_nac:
- ID = Intrinsic::hexagon_S2_lsl_r_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_nac:
- ID = Intrinsic::hexagon_S2_asr_r_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_nac:
- ID = Intrinsic::hexagon_S2_asl_r_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_nac:
- ID = Intrinsic::hexagon_S2_lsr_r_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_nac:
- ID = Intrinsic::hexagon_S2_lsl_r_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_and:
- ID = Intrinsic::hexagon_S2_asr_r_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_and:
- ID = Intrinsic::hexagon_S2_asl_r_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_and:
- ID = Intrinsic::hexagon_S2_lsr_r_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_and:
- ID = Intrinsic::hexagon_S2_lsl_r_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_or:
- ID = Intrinsic::hexagon_S2_asr_r_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_or:
- ID = Intrinsic::hexagon_S2_asl_r_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_or:
- ID = Intrinsic::hexagon_S2_lsr_r_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_or:
- ID = Intrinsic::hexagon_S2_lsl_r_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_and:
- ID = Intrinsic::hexagon_S2_asr_r_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_and:
- ID = Intrinsic::hexagon_S2_asl_r_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_and:
- ID = Intrinsic::hexagon_S2_lsr_r_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_and:
- ID = Intrinsic::hexagon_S2_lsl_r_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_or:
- ID = Intrinsic::hexagon_S2_asr_r_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_or:
- ID = Intrinsic::hexagon_S2_asl_r_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_or:
- ID = Intrinsic::hexagon_S2_lsr_r_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_or:
- ID = Intrinsic::hexagon_S2_lsl_r_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_sat:
- ID = Intrinsic::hexagon_S2_asr_r_r_sat; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_sat:
- ID = Intrinsic::hexagon_S2_asl_r_r_sat; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r:
- ID = Intrinsic::hexagon_S2_asr_i_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r:
- ID = Intrinsic::hexagon_S2_lsr_i_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r:
- ID = Intrinsic::hexagon_S2_asl_i_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p:
- ID = Intrinsic::hexagon_S2_asr_i_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p:
- ID = Intrinsic::hexagon_S2_lsr_i_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p:
- ID = Intrinsic::hexagon_S2_asl_i_p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc:
- ID = Intrinsic::hexagon_S2_asr_i_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc:
- ID = Intrinsic::hexagon_S2_lsr_i_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc:
- ID = Intrinsic::hexagon_S2_asl_i_r_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc:
- ID = Intrinsic::hexagon_S2_asr_i_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc:
- ID = Intrinsic::hexagon_S2_lsr_i_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc:
- ID = Intrinsic::hexagon_S2_asl_i_p_acc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac:
- ID = Intrinsic::hexagon_S2_asr_i_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac:
- ID = Intrinsic::hexagon_S2_lsr_i_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac:
- ID = Intrinsic::hexagon_S2_asl_i_r_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac:
- ID = Intrinsic::hexagon_S2_asr_i_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac:
- ID = Intrinsic::hexagon_S2_lsr_i_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac:
- ID = Intrinsic::hexagon_S2_asl_i_p_nac; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc:
- ID = Intrinsic::hexagon_S2_lsr_i_r_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc:
- ID = Intrinsic::hexagon_S2_asl_i_r_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc:
- ID = Intrinsic::hexagon_S2_lsr_i_p_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc:
- ID = Intrinsic::hexagon_S2_asl_i_p_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and:
- ID = Intrinsic::hexagon_S2_asr_i_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and:
- ID = Intrinsic::hexagon_S2_lsr_i_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and:
- ID = Intrinsic::hexagon_S2_asl_i_r_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or:
- ID = Intrinsic::hexagon_S2_asr_i_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or:
- ID = Intrinsic::hexagon_S2_lsr_i_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or:
- ID = Intrinsic::hexagon_S2_asl_i_r_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and:
- ID = Intrinsic::hexagon_S2_asr_i_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and:
- ID = Intrinsic::hexagon_S2_lsr_i_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and:
- ID = Intrinsic::hexagon_S2_asl_i_p_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or:
- ID = Intrinsic::hexagon_S2_asr_i_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or:
- ID = Intrinsic::hexagon_S2_lsr_i_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or:
- ID = Intrinsic::hexagon_S2_asl_i_p_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat:
- ID = Intrinsic::hexagon_S2_asl_i_r_sat; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd:
- ID = Intrinsic::hexagon_S2_asr_i_r_rnd; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax:
- ID = Intrinsic::hexagon_S2_asr_i_r_rnd_goodsyntax; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri:
- ID = Intrinsic::hexagon_S2_addasl_rrri; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_valignib:
- ID = Intrinsic::hexagon_S2_valignib; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_valignrb:
- ID = Intrinsic::hexagon_S2_valignrb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vspliceib:
- ID = Intrinsic::hexagon_S2_vspliceib; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsplicerb:
- ID = Intrinsic::hexagon_S2_vsplicerb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsplatrh:
- ID = Intrinsic::hexagon_S2_vsplatrh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsplatrb:
- ID = Intrinsic::hexagon_S2_vsplatrb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_insert:
- ID = Intrinsic::hexagon_S2_insert; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax:
- ID = Intrinsic::hexagon_S2_tableidxb_goodsyntax; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax:
- ID = Intrinsic::hexagon_S2_tableidxh_goodsyntax; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax:
- ID = Intrinsic::hexagon_S2_tableidxw_goodsyntax; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax:
- ID = Intrinsic::hexagon_S2_tableidxd_goodsyntax; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_extractu:
- ID = Intrinsic::hexagon_S2_extractu; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_insertp:
- ID = Intrinsic::hexagon_S2_insertp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_extractup:
- ID = Intrinsic::hexagon_S2_extractup; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_insert_rp:
- ID = Intrinsic::hexagon_S2_insert_rp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_extractu_rp:
- ID = Intrinsic::hexagon_S2_extractu_rp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_insertp_rp:
- ID = Intrinsic::hexagon_S2_insertp_rp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_extractup_rp:
- ID = Intrinsic::hexagon_S2_extractup_rp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tstbit_i:
- ID = Intrinsic::hexagon_S2_tstbit_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_setbit_i:
- ID = Intrinsic::hexagon_S2_setbit_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_togglebit_i:
- ID = Intrinsic::hexagon_S2_togglebit_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_clrbit_i:
- ID = Intrinsic::hexagon_S2_clrbit_i; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_tstbit_r:
- ID = Intrinsic::hexagon_S2_tstbit_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_setbit_r:
- ID = Intrinsic::hexagon_S2_setbit_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_togglebit_r:
- ID = Intrinsic::hexagon_S2_togglebit_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_clrbit_r:
- ID = Intrinsic::hexagon_S2_clrbit_r; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh:
- ID = Intrinsic::hexagon_S2_asr_i_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh:
- ID = Intrinsic::hexagon_S2_lsr_i_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh:
- ID = Intrinsic::hexagon_S2_asl_i_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vh:
- ID = Intrinsic::hexagon_S2_asr_r_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vh:
- ID = Intrinsic::hexagon_S2_asl_r_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vh:
- ID = Intrinsic::hexagon_S2_lsr_r_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vh:
- ID = Intrinsic::hexagon_S2_lsl_r_vh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw:
- ID = Intrinsic::hexagon_S2_asr_i_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun:
- ID = Intrinsic::hexagon_S2_asr_i_svw_trun; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_svw_trun:
- ID = Intrinsic::hexagon_S2_asr_r_svw_trun; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw:
- ID = Intrinsic::hexagon_S2_lsr_i_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw:
- ID = Intrinsic::hexagon_S2_asl_i_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vw:
- ID = Intrinsic::hexagon_S2_asr_r_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vw:
- ID = Intrinsic::hexagon_S2_asl_r_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vw:
- ID = Intrinsic::hexagon_S2_lsr_r_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vw:
- ID = Intrinsic::hexagon_S2_lsl_r_vw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwh:
- ID = Intrinsic::hexagon_S2_vrndpackwh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwhs:
- ID = Intrinsic::hexagon_S2_vrndpackwhs; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsxtbh:
- ID = Intrinsic::hexagon_S2_vsxtbh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vzxtbh:
- ID = Intrinsic::hexagon_S2_vzxtbh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsathub:
- ID = Intrinsic::hexagon_S2_vsathub; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_svsathub:
- ID = Intrinsic::hexagon_S2_svsathub; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_svsathb:
- ID = Intrinsic::hexagon_S2_svsathb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsathb:
- ID = Intrinsic::hexagon_S2_vsathb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vtrunohb:
- ID = Intrinsic::hexagon_S2_vtrunohb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vtrunewh:
- ID = Intrinsic::hexagon_S2_vtrunewh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vtrunowh:
- ID = Intrinsic::hexagon_S2_vtrunowh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vtrunehb:
- ID = Intrinsic::hexagon_S2_vtrunehb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsxthw:
- ID = Intrinsic::hexagon_S2_vsxthw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vzxthw:
- ID = Intrinsic::hexagon_S2_vzxthw; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsatwh:
- ID = Intrinsic::hexagon_S2_vsatwh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh:
- ID = Intrinsic::hexagon_S2_vsatwuh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_packhl:
- ID = Intrinsic::hexagon_S2_packhl; break;
-
- case Hexagon::BI__builtin_HEXAGON_A2_swiz:
- ID = Intrinsic::hexagon_A2_swiz; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsathub_nopack:
- ID = Intrinsic::hexagon_S2_vsathub_nopack; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsathb_nopack:
- ID = Intrinsic::hexagon_S2_vsathb_nopack; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsatwh_nopack:
- ID = Intrinsic::hexagon_S2_vsatwh_nopack; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh_nopack:
- ID = Intrinsic::hexagon_S2_vsatwuh_nopack; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_shuffob:
- ID = Intrinsic::hexagon_S2_shuffob; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_shuffeb:
- ID = Intrinsic::hexagon_S2_shuffeb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_shuffoh:
- ID = Intrinsic::hexagon_S2_shuffoh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_shuffeh:
- ID = Intrinsic::hexagon_S2_shuffeh; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_parityp:
- ID = Intrinsic::hexagon_S2_parityp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_lfsp:
- ID = Intrinsic::hexagon_S2_lfsp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_clbnorm:
- ID = Intrinsic::hexagon_S2_clbnorm; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_clb:
- ID = Intrinsic::hexagon_S2_clb; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_cl0:
- ID = Intrinsic::hexagon_S2_cl0; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_cl1:
- ID = Intrinsic::hexagon_S2_cl1; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_clbp:
- ID = Intrinsic::hexagon_S2_clbp; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_cl0p:
- ID = Intrinsic::hexagon_S2_cl0p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_cl1p:
- ID = Intrinsic::hexagon_S2_cl1p; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_brev:
- ID = Intrinsic::hexagon_S2_brev; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_ct0:
- ID = Intrinsic::hexagon_S2_ct0; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_ct1:
- ID = Intrinsic::hexagon_S2_ct1; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_interleave:
- ID = Intrinsic::hexagon_S2_interleave; break;
-
- case Hexagon::BI__builtin_HEXAGON_S2_deinterleave:
- ID = Intrinsic::hexagon_S2_deinterleave; break;
-
- case Hexagon::BI__builtin_SI_to_SXTHI_asrh:
- ID = Intrinsic::hexagon_SI_to_SXTHI_asrh; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_orn:
- ID = Intrinsic::hexagon_A4_orn; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_andn:
- ID = Intrinsic::hexagon_A4_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_ornp:
- ID = Intrinsic::hexagon_A4_ornp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_andnp:
- ID = Intrinsic::hexagon_A4_andnp; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_combineir:
- ID = Intrinsic::hexagon_A4_combineir; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_combineri:
- ID = Intrinsic::hexagon_A4_combineri; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmpneqi:
- ID = Intrinsic::hexagon_C4_cmpneqi; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmpneq:
- ID = Intrinsic::hexagon_C4_cmpneq; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmpltei:
- ID = Intrinsic::hexagon_C4_cmpltei; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmplte:
- ID = Intrinsic::hexagon_C4_cmplte; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmplteui:
- ID = Intrinsic::hexagon_C4_cmplteui; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_cmplteu:
- ID = Intrinsic::hexagon_C4_cmplteu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_rcmpneq:
- ID = Intrinsic::hexagon_A4_rcmpneq; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_rcmpneqi:
- ID = Intrinsic::hexagon_A4_rcmpneqi; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_rcmpeq:
- ID = Intrinsic::hexagon_A4_rcmpeq; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_rcmpeqi:
- ID = Intrinsic::hexagon_A4_rcmpeqi; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9:
- ID = Intrinsic::hexagon_C4_fastcorner9; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9_not:
- ID = Intrinsic::hexagon_C4_fastcorner9_not; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_and_andn:
- ID = Intrinsic::hexagon_C4_and_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_and_and:
- ID = Intrinsic::hexagon_C4_and_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_and_orn:
- ID = Intrinsic::hexagon_C4_and_orn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_and_or:
- ID = Intrinsic::hexagon_C4_and_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_or_andn:
- ID = Intrinsic::hexagon_C4_or_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_or_and:
- ID = Intrinsic::hexagon_C4_or_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_or_orn:
- ID = Intrinsic::hexagon_C4_or_orn; break;
-
- case Hexagon::BI__builtin_HEXAGON_C4_or_or:
- ID = Intrinsic::hexagon_C4_or_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_S4_addaddi:
- ID = Intrinsic::hexagon_S4_addaddi; break;
-
- case Hexagon::BI__builtin_HEXAGON_S4_subaddi:
- ID = Intrinsic::hexagon_S4_subaddi; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_xor_xacc:
- ID = Intrinsic::hexagon_M4_xor_xacc; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_and_and:
- ID = Intrinsic::hexagon_M4_and_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_and_or:
- ID = Intrinsic::hexagon_M4_and_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_and_xor:
- ID = Intrinsic::hexagon_M4_and_xor; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_and_andn:
- ID = Intrinsic::hexagon_M4_and_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_xor_and:
- ID = Intrinsic::hexagon_M4_xor_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_xor_or:
- ID = Intrinsic::hexagon_M4_xor_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_xor_andn:
- ID = Intrinsic::hexagon_M4_xor_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_or_and:
- ID = Intrinsic::hexagon_M4_or_and; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_or_or:
- ID = Intrinsic::hexagon_M4_or_or; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_or_xor:
- ID = Intrinsic::hexagon_M4_or_xor; break;
-
- case Hexagon::BI__builtin_HEXAGON_M4_or_andn:
- ID = Intrinsic::hexagon_M4_or_andn; break;
-
- case Hexagon::BI__builtin_HEXAGON_S4_or_andix:
- ID = Intrinsic::hexagon_S4_or_andix; break;
-
- case Hexagon::BI__builtin_HEXAGON_S4_or_andi:
- ID = Intrinsic::hexagon_S4_or_andi; break;
-
- case Hexagon::BI__builtin_HEXAGON_S4_or_ori:
- ID = Intrinsic::hexagon_S4_or_ori; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_modwrapu:
- ID = Intrinsic::hexagon_A4_modwrapu; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_cround_rr:
- ID = Intrinsic::hexagon_A4_cround_rr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_round_ri:
- ID = Intrinsic::hexagon_A4_round_ri; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_round_rr:
- ID = Intrinsic::hexagon_A4_round_rr; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat:
- ID = Intrinsic::hexagon_A4_round_ri_sat; break;
-
- case Hexagon::BI__builtin_HEXAGON_A4_round_rr_sat:
- ID = Intrinsic::hexagon_A4_round_rr_sat; break;
+ case X86::BI__builtin_ia32_rdrand16_step:
+ case X86::BI__builtin_ia32_rdrand32_step:
+ case X86::BI__builtin_ia32_rdrand64_step: {
+ Intrinsic::ID ID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_rdrand16_step:
+ ID = Intrinsic::x86_rdrand_16;
+ break;
+ case X86::BI__builtin_ia32_rdrand32_step:
+ ID = Intrinsic::x86_rdrand_32;
+ break;
+ case X86::BI__builtin_ia32_rdrand64_step:
+ ID = Intrinsic::x86_rdrand_64;
+ break;
+ }
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
+ Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
+ return Builder.CreateExtractValue(Call, 1);
+ }
}
-
- llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Ops, "");
}
+
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
SmallVector<Value*, 4> Ops;
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 7c08650..003fef5 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -53,7 +53,7 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// destructor separately.
for (CXXRecordDecl::field_iterator I = Class->field_begin(),
E = Class->field_end(); I != E; ++I)
- if ((*I)->getType().isDestructedType())
+ if (I->getType().isDestructedType())
return true;
// Try to find a unique base class with a non-trivial destructor.
@@ -91,7 +91,7 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// If the base is at a non-zero offset, give up.
const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
- if (ClassLayout.getBaseClassOffsetInBits(UniqueBase) != 0)
+ if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero())
return true;
return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index befebbe..aba5d75 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -23,7 +23,7 @@ static void ErrorUnsupportedABI(CodeGenFunction &CGF,
StringRef S) {
DiagnosticsEngine &Diags = CGF.CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot yet compile %1 in this ABI");
+ "cannot yet compile %0 in this ABI");
Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
DiagID)
<< S;
@@ -145,6 +145,13 @@ void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
}
CharUnits CGCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!requiresArrayCookie(expr))
+ return CharUnits::Zero();
+ return getArrayCookieSizeImpl(expr->getAllocatedType());
+}
+
+CharUnits CGCXXABI::getArrayCookieSizeImpl(QualType elementType) {
+ // BOGUS
return CharUnits::Zero();
}
@@ -158,16 +165,53 @@ llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
return 0;
}
-void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
- const CXXDeleteExpr *expr, QualType ElementType,
- llvm::Value *&NumElements,
- llvm::Value *&AllocPtr, CharUnits &CookieSize) {
- ErrorUnsupportedABI(CGF, "array cookie reading");
+bool CGCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
- // This should be enough to avoid assertions.
- NumElements = 0;
- AllocPtr = llvm::Constant::getNullValue(CGF.Builder.getInt8PtrTy());
- CookieSize = CharUnits::Zero();
+ return elementType.isDestructedType();
+}
+
+bool CGCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ return expr->getAllocatedType().isDestructedType();
+}
+
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *ptr,
+ const CXXDeleteExpr *expr, QualType eltTy,
+ llvm::Value *&numElements,
+ llvm::Value *&allocPtr, CharUnits &cookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ unsigned AS = cast<llvm::PointerType>(ptr->getType())->getAddressSpace();
+ llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
+ ptr = CGF.Builder.CreateBitCast(ptr, charPtrTy);
+
+ // If we don't need an array cookie, bail out early.
+ if (!requiresArrayCookie(expr, eltTy)) {
+ allocPtr = ptr;
+ numElements = 0;
+ cookieSize = CharUnits::Zero();
+ return;
+ }
+
+ cookieSize = getArrayCookieSizeImpl(eltTy);
+ allocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ptr,
+ -cookieSize.getQuantity());
+ numElements = readArrayCookieImpl(CGF, allocPtr, cookieSize);
+}
+
+llvm::Value *CGCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *ptr,
+ CharUnits cookieSize) {
+ ErrorUnsupportedABI(CGF, "reading a new[] cookie");
+ return llvm::ConstantInt::get(CGF.SizeTy, 0);
}
void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
@@ -177,6 +221,13 @@ void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
ErrorUnsupportedABI(CGF, "static local variable initialization");
}
+void CGCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // The default behavior is to use atexit.
+ CGF.registerGlobalDtorWithAtExit(dtor, addr);
+}
+
/// Returns the adjustment, in bytes, required for the given
/// member-pointer operation. Returns null if no adjustment is
/// required.
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 4e045f5..a0dcdfd 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -38,7 +38,7 @@ namespace CodeGen {
class CodeGenFunction;
class CodeGenModule;
-/// Implements C++ ABI-specific code generation functions.
+/// \brief Implements C++ ABI-specific code generation functions.
class CGCXXABI {
protected:
CodeGenModule &CGM;
@@ -71,6 +71,9 @@ protected:
ASTContext &getContext() const { return CGM.getContext(); }
+ virtual bool requiresArrayCookie(const CXXDeleteExpr *E, QualType eltType);
+ virtual bool requiresArrayCookie(const CXXNewExpr *E);
+
public:
virtual ~CGCXXABI();
@@ -190,18 +193,20 @@ public:
virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType);
+ /// Gets the pure virtual member call function.
+ virtual StringRef GetPureVirtualCallName() = 0;
+
/**************************** Array cookies ******************************/
/// Returns the extra size required in order to store the array
- /// cookie for the given type. May return 0 to indicate that no
+ /// cookie for the given new-expression. May return 0 to indicate that no
/// array cookie is required.
///
/// Several cases are filtered out before this method is called:
/// - non-array allocations never need a cookie
- /// - calls to ::operator new(size_t, void*) never need a cookie
+ /// - calls to \::operator new(size_t, void*) never need a cookie
///
- /// \param ElementType - the allocated type of the expression,
- /// i.e. the pointee type of the expression result type
+ /// \param expr - the new-expression being allocated.
virtual CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
/// Initialize the array cookie for the given allocation.
@@ -209,7 +214,8 @@ public:
/// \param NewPtr - a char* which is the presumed-non-null
/// return value of the allocation function
/// \param NumElements - the computed number of elements,
- /// potentially collapsed from the multidimensional array case
+ /// potentially collapsed from the multidimensional array case;
+ /// always a size_t
/// \param ElementType - the base element allocated type,
/// i.e. the allocated type after stripping all array types
virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
@@ -236,6 +242,27 @@ public:
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
+protected:
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given type. Assumes that an array cookie is
+ /// required.
+ virtual CharUnits getArrayCookieSizeImpl(QualType elementType);
+
+ /// Reads the array cookie for an allocation which is known to have one.
+ /// This is called by the standard implementation of ReadArrayCookie.
+ ///
+ /// \param ptr - a pointer to the allocation made for an array, as a char*
+ /// \param cookieSize - the computed cookie size of an array
+ ///
+ /// Other parameters are as above.
+ ///
+ /// \return a size_t
+ virtual llvm::Value *readArrayCookieImpl(CodeGenFunction &IGF,
+ llvm::Value *ptr,
+ CharUnits cookieSize);
+
+public:
+
/*************************** Static local guards ****************************/
/// Emits the guarded initializer and destructor setup for the given
@@ -249,6 +276,18 @@ public:
virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr, bool PerformInit);
+ /// Emit code to force the execution of a destructor during global
+ /// teardown. The default implementation of this uses atexit.
+ ///
+ /// \param dtor - a function taking a single pointer argument
+ /// \param addr - a pointer to pass to the destructor function.
+ virtual void registerGlobalDtor(CodeGenFunction &CGF, llvm::Constant *dtor,
+ llvm::Constant *addr);
+
+ /***************************** Virtual Tables *******************************/
+
+ /// Generates and emits the virtual tables for a class.
+ virtual void EmitVTables(const CXXRecordDecl *Class) = 0;
};
/// Creates an instance of a C++ ABI class.
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 82ee4fc..7d2b9d3 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -67,39 +67,68 @@ static CanQualType GetReturnType(QualType RetTy) {
return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
}
-/// Arrange the argument and result information for a value of the
-/// given unprototyped function type.
+/// Arrange the argument and result information for a value of the given
+/// unprototyped freestanding function type.
const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
+CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
// When translating an unprototyped function type, always use a
// variadic type.
- return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(),
- ArrayRef<CanQualType>(),
- FTNP->getExtInfo(),
- RequiredArgs(0));
+ return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
+ ArrayRef<CanQualType>(),
+ FTNP->getExtInfo(),
+ RequiredArgs(0));
}
-/// Arrange the argument and result information for a value of the
-/// given function type, on top of any implicit parameters already
-/// stored.
-static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &argTypes,
- CanQual<FunctionProtoType> FTP) {
- RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
+/// Arrange the LLVM function layout for a value of the given function
+/// type, on top of any implicit parameters already stored. Use the
+/// given ExtInfo instead of the ExtInfo from the function type.
+static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &prefix,
+ CanQual<FunctionProtoType> FTP,
+ FunctionType::ExtInfo extInfo) {
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- argTypes.push_back(FTP->getArgType(i));
+ prefix.push_back(FTP->getArgType(i));
CanQualType resultType = FTP->getResultType().getUnqualifiedType();
- return CGT.arrangeFunctionType(resultType, argTypes,
- FTP->getExtInfo(), required);
+ return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
+}
+
+/// Arrange the argument and result information for a free function (i.e.
+/// not a C++ or ObjC instance method) of the given type.
+static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &prefix,
+ CanQual<FunctionProtoType> FTP) {
+ return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
+}
+
+/// Given the formal ext-info of a C++ instance method, adjust it
+/// according to the C++ ABI in effect.
+static void adjustCXXMethodInfo(CodeGenTypes &CGT,
+ FunctionType::ExtInfo &extInfo,
+ bool isVariadic) {
+ if (extInfo.getCC() == CC_Default) {
+ CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
+ extInfo = extInfo.withCallingConv(CC);
+ }
+}
+
+/// Arrange the argument and result information for a free function (i.e.
+/// not a C++ or ObjC instance method) of the given type.
+static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &prefix,
+ CanQual<FunctionProtoType> FTP) {
+ FunctionType::ExtInfo extInfo = FTP->getExtInfo();
+ adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
+ return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
}
/// Arrange the argument and result information for a value of the
-/// given function type.
+/// given freestanding function type.
const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) {
+CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
SmallVector<CanQualType, 16> argTypes;
- return ::arrangeFunctionType(*this, argTypes, FTP);
+ return ::arrangeFreeFunctionType(*this, argTypes, FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D) {
@@ -134,7 +163,7 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
// Add the 'this' pointer.
argTypes.push_back(GetThisType(Context, RD));
- return ::arrangeFunctionType(*this, argTypes,
+ return ::arrangeCXXMethodType(*this, argTypes,
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
@@ -154,7 +183,7 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
}
- return arrangeFunctionType(prototype);
+ return arrangeFreeFunctionType(prototype);
}
/// Arrange the argument and result information for a declaration
@@ -176,7 +205,9 @@ CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
argTypes.push_back(FTP->getArgType(i));
- return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required);
+ FunctionType::ExtInfo extInfo = FTP->getExtInfo();
+ adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
+ return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
}
/// Arrange the argument and result information for a declaration,
@@ -193,9 +224,12 @@ CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
CanQual<FunctionProtoType> FTP = GetFormalType(D);
assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
+ assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
- return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(),
- RequiredArgs::All);
+ FunctionType::ExtInfo extInfo = FTP->getExtInfo();
+ adjustCXXMethodInfo(*this, extInfo, false);
+ return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
+ RequiredArgs::All);
}
/// Arrange the argument and result information for the declaration or
@@ -214,14 +248,14 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
// non-variadic type.
if (isa<FunctionNoProtoType>(FTy)) {
CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
- return arrangeFunctionType(noProto->getResultType(),
- ArrayRef<CanQualType>(),
- noProto->getExtInfo(),
- RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(noProto->getResultType(),
+ ArrayRef<CanQualType>(),
+ noProto->getExtInfo(),
+ RequiredArgs::All);
}
assert(isa<FunctionProtoType>(FTy));
- return arrangeFunctionType(FTy.getAs<FunctionProtoType>());
+ return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
}
/// Arrange the argument and result information for the declaration or
@@ -261,8 +295,8 @@ CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
RequiredArgs required =
(MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
- return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys,
- einfo, required);
+ return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
+ einfo, required);
}
const CGFunctionInfo &
@@ -284,8 +318,8 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
/// because the function might be unprototyped, in which case it's
/// target-dependent in crazy ways.
const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
- const FunctionType *fnType) {
+CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
+ const FunctionType *fnType) {
RequiredArgs required = RequiredArgs::All;
if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
if (proto->isVariadic())
@@ -295,22 +329,39 @@ CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
required = RequiredArgs(0);
}
- return arrangeFunctionCall(fnType->getResultType(), args,
- fnType->getExtInfo(), required);
+ return arrangeFreeFunctionCall(fnType->getResultType(), args,
+ fnType->getExtInfo(), required);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
+ const CallArgList &args,
+ FunctionType::ExtInfo info,
+ RequiredArgs required) {
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (CallArgList::const_iterator i = args.begin(), e = args.end();
+ i != e; ++i)
+ argTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
+ required);
}
+/// Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionCall(QualType resultType,
- const CallArgList &args,
- const FunctionType::ExtInfo &info,
- RequiredArgs required) {
+CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
+ const FunctionProtoType *FPT,
+ RequiredArgs required) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
for (CallArgList::const_iterator i = args.begin(), e = args.end();
i != e; ++i)
argTypes.push_back(Context.getCanonicalParamType(i->Ty));
- return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
- required);
+
+ FunctionType::ExtInfo info = FPT->getExtInfo();
+ adjustCXXMethodInfo(*this, info, FPT->isVariadic());
+ return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
+ argTypes, info, required);
}
const CGFunctionInfo &
@@ -326,23 +377,23 @@ CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
RequiredArgs required =
(isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
- return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
- required);
+ return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
+ required);
}
const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
- return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(),
- FunctionType::ExtInfo(), RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(),
+ FunctionType::ExtInfo(), RequiredArgs::All);
}
/// Arrange the argument and result information for an abstract value
/// of a given function type. This is the method which all of the
/// above functions ultimately defer to.
const CGFunctionInfo &
-CodeGenTypes::arrangeFunctionType(CanQualType resultType,
- ArrayRef<CanQualType> argTypes,
- const FunctionType::ExtInfo &info,
- RequiredArgs required) {
+CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ FunctionType::ExtInfo info,
+ RequiredArgs required) {
#ifndef NDEBUG
for (ArrayRef<CanQualType>::const_iterator
I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
@@ -445,10 +496,9 @@ void CodeGenTypes::GetExpandedTypes(QualType type,
} else {
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
- const FieldDecl *FD = *i;
- assert(!FD->isBitField() &&
+ assert(!i->isBitField() &&
"Cannot expand structure with bit-field members.");
- GetExpandedTypes(FD->getType(), expandedTypes);
+ GetExpandedTypes(i->getType(), expandedTypes);
}
}
} else if (const ComplexType *CT = type->getAs<ComplexType>()) {
@@ -933,14 +983,18 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
case ABIArgInfo::Ignore:
break;
- case ABIArgInfo::Indirect:
- PAL.push_back(llvm::AttributeWithIndex::get(Index,
- llvm::Attribute::StructRet));
+ case ABIArgInfo::Indirect: {
+ llvm::Attributes SRETAttrs = llvm::Attribute::StructRet;
+ if (RetAI.getInReg())
+ SRETAttrs |= llvm::Attribute::InReg;
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, SRETAttrs));
+
++Index;
// sret disables readnone and readonly
FuncAttrs &= ~(llvm::Attribute::ReadOnly |
llvm::Attribute::ReadNone);
break;
+ }
case ABIArgInfo::Expand:
llvm_unreachable("Invalid ABI kind for return argument");
@@ -949,14 +1003,6 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (RetAttrs)
PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
- // FIXME: RegParm should be reduced in case of global register variable.
- signed RegParm;
- if (FI.getHasRegParm())
- RegParm = FI.getRegParm();
- else
- RegParm = CodeGenOpts.NumRegisterParameters;
-
- unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0);
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = FI.arg_end(); it != ie; ++it) {
QualType ParamType = it->type;
@@ -974,22 +1020,22 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
Attrs |= llvm::Attribute::ZExt;
// FALL THROUGH
case ABIArgInfo::Direct:
- if (RegParm > 0 &&
- (ParamType->isIntegerType() || ParamType->isPointerType() ||
- ParamType->isReferenceType())) {
- RegParm -=
- (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
- if (RegParm >= 0)
+ if (AI.getInReg())
Attrs |= llvm::Attribute::InReg;
- }
+
// FIXME: handle sseregparm someday...
// Increment Index if there is padding.
Index += (AI.getPaddingType() != 0);
if (llvm::StructType *STy =
- dyn_cast<llvm::StructType>(AI.getCoerceToType()))
- Index += STy->getNumElements()-1; // 1 will be added below.
+ dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
+ unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
+ if (Attrs != llvm::Attribute::None)
+ for (unsigned I = 0; I < Extra; ++I)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index + I, Attrs));
+ Index += Extra;
+ }
break;
case ABIArgInfo::Indirect:
@@ -1355,7 +1401,8 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
llvm::Value *result) {
// This is only applicable to a method with an immutable 'self'.
- const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+ const ObjCMethodDecl *method =
+ dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
if (!method) return 0;
const VarDecl *self = method->getSelfDecl();
if (!self->getType().isConstQualified()) return 0;
@@ -2066,8 +2113,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
- llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
- AttributeList.end());
+ llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList);
llvm::BasicBlock *InvokeDest = 0;
if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 2aedf95..e37fa3a 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -105,30 +105,28 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
}
static llvm::Value *
-ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
- CharUnits NonVirtual, llvm::Value *Virtual) {
- llvm::Type *PtrDiffTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
-
- llvm::Value *NonVirtualOffset = 0;
- if (!NonVirtual.isZero())
- NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy,
- NonVirtual.getQuantity());
-
- llvm::Value *BaseOffset;
- if (Virtual) {
- if (NonVirtualOffset)
- BaseOffset = CGF.Builder.CreateAdd(Virtual, NonVirtualOffset);
- else
- BaseOffset = Virtual;
- } else
- BaseOffset = NonVirtualOffset;
+ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
+ CharUnits nonVirtualOffset,
+ llvm::Value *virtualOffset) {
+ // Assert that we have something to do.
+ assert(!nonVirtualOffset.isZero() || virtualOffset != 0);
+
+ // Compute the offset from the static and dynamic components.
+ llvm::Value *baseOffset;
+ if (!nonVirtualOffset.isZero()) {
+ baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
+ nonVirtualOffset.getQuantity());
+ if (virtualOffset) {
+ baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
+ }
+ } else {
+ baseOffset = virtualOffset;
+ }
// Apply the base offset.
- ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, CGF.Int8PtrTy);
- ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
-
- return ThisPtr;
+ ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
+ ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
+ return ptr;
}
llvm::Value *
@@ -142,72 +140,81 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
CastExpr::path_const_iterator Start = PathBegin;
const CXXRecordDecl *VBase = 0;
- // Get the virtual base.
+ // Sema has done some convenient canonicalization here: if the
+ // access path involved any virtual steps, the conversion path will
+ // *start* with a step down to the correct virtual base subobject,
+ // and hence will not require any further steps.
if ((*Start)->isVirtual()) {
VBase =
cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
++Start;
}
-
+
+ // Compute the static offset of the ultimate destination within its
+ // allocating subobject (the virtual base, if there is one, or else
+ // the "complete" object that we see).
CharUnits NonVirtualOffset =
ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
Start, PathEnd);
+ // If there's a virtual step, we can sometimes "devirtualize" it.
+ // For now, that's limited to when the derived type is final.
+ // TODO: "devirtualize" this for accesses to known-complete objects.
+ if (VBase && Derived->hasAttr<FinalAttr>()) {
+ const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
+ CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
+ NonVirtualOffset += vBaseOffset;
+ VBase = 0; // we no longer have a virtual step
+ }
+
// Get the base pointer type.
llvm::Type *BasePtrTy =
ConvertType((PathEnd[-1])->getType())->getPointerTo();
-
+
+ // If the static offset is zero and we don't have a virtual step,
+ // just do a bitcast; null checks are unnecessary.
if (NonVirtualOffset.isZero() && !VBase) {
- // Just cast back.
return Builder.CreateBitCast(Value, BasePtrTy);
}
+
+ llvm::BasicBlock *origBB = 0;
+ llvm::BasicBlock *endBB = 0;
- llvm::BasicBlock *CastNull = 0;
- llvm::BasicBlock *CastNotNull = 0;
- llvm::BasicBlock *CastEnd = 0;
-
+ // Skip over the offset (and the vtable load) if we're supposed to
+ // null-check the pointer.
if (NullCheckValue) {
- CastNull = createBasicBlock("cast.null");
- CastNotNull = createBasicBlock("cast.notnull");
- CastEnd = createBasicBlock("cast.end");
+ origBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
+ endBB = createBasicBlock("cast.end");
- llvm::Value *IsNull = Builder.CreateIsNull(Value);
- Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
- EmitBlock(CastNotNull);
+ llvm::Value *isNull = Builder.CreateIsNull(Value);
+ Builder.CreateCondBr(isNull, endBB, notNullBB);
+ EmitBlock(notNullBB);
}
+ // Compute the virtual offset.
llvm::Value *VirtualOffset = 0;
-
if (VBase) {
- if (Derived->hasAttr<FinalAttr>()) {
- VirtualOffset = 0;
-
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
-
- CharUnits VBaseOffset = Layout.getVBaseClassOffset(VBase);
- NonVirtualOffset += VBaseOffset;
- } else
- VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+ VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
}
- // Apply the offsets.
+ // Apply both offsets.
Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
NonVirtualOffset,
VirtualOffset);
- // Cast back.
+ // Cast to the destination type.
Value = Builder.CreateBitCast(Value, BasePtrTy);
-
+
+ // Build a phi if we needed a null check.
if (NullCheckValue) {
- Builder.CreateBr(CastEnd);
- EmitBlock(CastNull);
- Builder.CreateBr(CastEnd);
- EmitBlock(CastEnd);
+ llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
+ Builder.CreateBr(endBB);
+ EmitBlock(endBB);
- llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
- PHI->addIncoming(Value, CastNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
- CastNull);
+ llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
+ PHI->addIncoming(Value, notNullBB);
+ PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
Value = PHI;
}
@@ -556,16 +563,19 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
llvm::Value *ThisPtr = CGF.LoadCXXThis();
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
- LValue LHS;
+ LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
- // If we are initializing an anonymous union field, drill down to the field.
if (MemberInit->isIndirectMemberInitializer()) {
- LHS = CGF.EmitLValueForAnonRecordField(ThisPtr,
- MemberInit->getIndirectMember(), 0);
+ // If we are initializing an anonymous union field, drill down to
+ // the field.
+ IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
+ IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(),
+ IEnd = IndirectField->chain_end();
+ for ( ; I != IEnd; ++I)
+ LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I));
FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
} else {
- LValue ThisLHSLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
- LHS = CGF.EmitLValueForFieldInitialization(ThisLHSLV, Field);
+ LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
}
// Special case: if we are in a copy or move constructor, and we are copying
@@ -717,7 +727,8 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// Before we go any further, try the complete->base constructor
// delegation optimization.
- if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor)) {
+ if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
+ CGM.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitLocation(Builder, Ctor->getLocEnd());
EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
@@ -916,7 +927,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// Enter the cleanup scopes for virtual bases.
EnterDtorCleanups(Dtor, Dtor_Complete);
- if (!isTryBody) {
+ if (!isTryBody && CGM.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
LoadCXXThis());
break;
@@ -1226,7 +1237,8 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CallExpr::const_arg_iterator ArgEnd) {
CGDebugInfo *DI = getDebugInfo();
- if (DI && CGM.getCodeGenOpts().LimitDebugInfo) {
+ if (DI &&
+ CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo) {
// If debug info for this class has not been emitted then this is the
// right time to do so.
const CXXRecordDecl *Parent = D->getParent();
@@ -1308,8 +1320,8 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
EmitCallArg(Args, *Arg, ArgType);
}
- EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
- ReturnValueSlot(), Args, D);
+ EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
+ Callee, ReturnValueSlot(), Args, D);
}
void
@@ -1742,38 +1754,42 @@ CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
return CGM.GetAddrOfFunction(MD, fnType);
}
-void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
- CallArgList &CallArgs) {
+void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *lambda,
+ CallArgList &callArgs) {
// Lookup the call operator
- DeclarationName Name
+ DeclarationName operatorName
= getContext().DeclarationNames.getCXXOperatorName(OO_Call);
- DeclContext::lookup_const_result Calls = Lambda->lookup(Name);
- CXXMethodDecl *CallOperator = cast<CXXMethodDecl>(*Calls.first++);
- const FunctionProtoType *FPT =
- CallOperator->getType()->getAs<FunctionProtoType>();
- QualType ResultType = FPT->getResultType();
+ CXXMethodDecl *callOperator =
+ cast<CXXMethodDecl>(*lambda->lookup(operatorName).first);
// Get the address of the call operator.
- GlobalDecl GD(CallOperator);
- const CGFunctionInfo &CalleeFnInfo =
- CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
- RequiredArgs::forPrototypePlus(FPT, 1));
- llvm::Type *Ty = CGM.getTypes().GetFunctionType(CalleeFnInfo);
- llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
-
- // Determine whether we have a return value slot to use.
- ReturnValueSlot Slot;
- if (!ResultType->isVoidType() &&
- CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
- hasAggregateLLVMType(CurFnInfo->getReturnType()))
- Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+ const CGFunctionInfo &calleeFnInfo =
+ CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
+ llvm::Value *callee =
+ CGM.GetAddrOfFunction(GlobalDecl(callOperator),
+ CGM.getTypes().GetFunctionType(calleeFnInfo));
+
+ // Prepare the return slot.
+ const FunctionProtoType *FPT =
+ callOperator->getType()->castAs<FunctionProtoType>();
+ QualType resultType = FPT->getResultType();
+ ReturnValueSlot returnSlot;
+ if (!resultType->isVoidType() &&
+ calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(calleeFnInfo.getReturnType()))
+ returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
+
+ // We don't need to separately arrange the call arguments because
+ // the call can't be variadic anyway --- it's impossible to forward
+ // variadic arguments.
// Now emit our call.
- RValue RV = EmitCall(CalleeFnInfo, Callee, Slot, CallArgs, CallOperator);
+ RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
+ callArgs, callOperator);
- // Forward the returned value
- if (!ResultType->isVoidType() && Slot.isNull())
- EmitReturnOfRValue(RV, ResultType);
+ // If necessary, copy the returned value into the slot.
+ if (!resultType->isVoidType() && returnSlot.isNull())
+ EmitReturnOfRValue(RV, resultType);
}
void CodeGenFunction::EmitLambdaBlockInvokeBody() {
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index b00e2a2..f9ea7e0 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -831,8 +831,12 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EmitBlock(EHEntry);
- cleanupFlags.setIsForEHCleanup();
- EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
+ // We only actually emit the cleanup code if the cleanup is either
+ // active or was used before it was deactivated.
+ if (EHActiveFlag || IsActive) {
+ cleanupFlags.setIsForEHCleanup();
+ EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
+ }
Builder.CreateBr(getEHDispatchBlock(EHParent));
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index 7726e44..d8dbe41 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -131,7 +131,7 @@ public:
/// A scope which attempts to handle some, possibly all, types of
/// exceptions.
///
-/// Objective C @finally blocks are represented using a cleanup scope
+/// Objective C \@finally blocks are represented using a cleanup scope
/// after the catch scope.
class EHCatchScope : public EHScope {
// In effect, we have a flexible array member
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index d286d24..00127ac 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -320,7 +320,7 @@ void CGDebugInfo::CreateCompileUnit() {
// Figure out which version of the ObjC runtime we have.
unsigned RuntimeVers = 0;
if (LO.ObjC1)
- RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
+ RuntimeVers = LO.ObjCRuntime.isNonFragile() ? 2 : 1;
// Create new compile unit.
DBuilder.createCompileUnit(
@@ -335,7 +335,7 @@ void CGDebugInfo::CreateCompileUnit() {
/// one if necessary.
llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
unsigned Encoding = 0;
- const char *BTName = NULL;
+ StringRef BTName;
switch (BT->getKind()) {
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
@@ -350,8 +350,8 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
return llvm::DIType();
case BuiltinType::ObjCClass:
return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", getOrCreateMainFile(),
- 0);
+ "objc_class", TheCU,
+ getOrCreateMainFile(), 0);
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
// typedef struct objc_object {
@@ -361,8 +361,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
// TODO: Cache these two types to avoid duplicates.
llvm::DIType OCTy =
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", getOrCreateMainFile(),
- 0);
+ "objc_class", TheCU, getOrCreateMainFile(), 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType ISATy = DBuilder.createPointerType(OCTy, Size);
@@ -382,7 +381,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::ObjCSel: {
return
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_selector", getOrCreateMainFile(),
+ "objc_selector", TheCU, getOrCreateMainFile(),
0);
}
case BuiltinType::UChar:
@@ -514,7 +513,7 @@ llvm::DIType CGDebugInfo::createRecordFwdDecl(const RecordDecl *RD,
llvm_unreachable("Unknown RecordDecl type!");
// Create the type.
- return DBuilder.createForwardDecl(Tag, RDName, DefUnit, Line);
+ return DBuilder.createForwardDecl(Tag, RDName, Ctx, DefUnit, Line);
}
// Walk up the context chain and create forward decls for record decls,
@@ -547,7 +546,7 @@ llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) {
/// then emit record's fwd if debug info size reduction is enabled.
llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
llvm::DIFile Unit) {
- if (!CGM.getCodeGenOpts().LimitDebugInfo)
+ if (CGM.getCodeGenOpts().DebugInfo != CodeGenOptions::LimitedDebugInfo)
return getOrCreateType(PointeeTy, Unit);
// Limit debug info for the pointee type.
@@ -577,8 +576,10 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
const Type *Ty,
QualType PointeeTy,
llvm::DIFile Unit) {
- if (Tag == llvm::dwarf::DW_TAG_reference_type)
- return DBuilder.createReferenceType(CreatePointeeType(PointeeTy, Unit));
+ if (Tag == llvm::dwarf::DW_TAG_reference_type ||
+ Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
+ return DBuilder.createReferenceType(Tag,
+ CreatePointeeType(PointeeTy, Unit));
// Bit size, align and offset of the type.
// Size is always the size of a pointer. We can't use getTypeSize here
@@ -683,15 +684,13 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
// FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
if (isa<FunctionNoProtoType>(Ty))
EltTys.push_back(DBuilder.createUnspecifiedParameter());
- else if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
- for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
+ else if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Ty)) {
+ for (unsigned i = 0, e = FPT->getNumArgs(); i != e; ++i)
+ EltTys.push_back(getOrCreateType(FPT->getArgType(i), Unit));
}
llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
-
- llvm::DIType DbgTy = DBuilder.createSubroutineType(Unit, EltTypeArray);
- return DbgTy;
+ return DBuilder.createSubroutineType(Unit, EltTypeArray);
}
@@ -765,7 +764,7 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record);
- // For C++11 Lambdas a Fields will be the same as a Capture, but the Capture
+ // For C++11 Lambdas a Field will be the same as a Capture, but the Capture
// has the name and the location of the variable so we should iterate over
// both concurrently.
if (CXXDecl && CXXDecl->isLambda()) {
@@ -912,7 +911,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
StringRef MethodName = getFunctionName(Method);
llvm::DIType MethodTy = getOrCreateMethodType(Method, Unit);
-
+
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
StringRef MethodLinkageName;
@@ -992,15 +991,17 @@ CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
if (D->isImplicit() && !D->isUsed())
continue;
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ // Only emit debug information for user provided functions, we're
+ // unlikely to want info for artificial functions.
+ if (Method->isUserProvided())
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ }
else if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
for (FunctionTemplateDecl::spec_iterator SI = FTD->spec_begin(),
- SE = FTD->spec_end(); SI != SE; ++SI) {
- FunctionDecl *FD = *SI;
- if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(FD))
- EltTys.push_back(CreateCXXMemberFunction(M, Unit, RecordTy));
- }
+ SE = FTD->spec_end(); SI != SE; ++SI)
+ EltTys.push_back(CreateCXXMemberFunction(cast<CXXMethodDecl>(*SI), Unit,
+ RecordTy));
}
}
@@ -1047,7 +1048,7 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
.getVirtualBaseOffsetOffset(RD, Base).getQuantity();
BFlags = llvm::DIDescriptor::FlagVirtual;
} else
- BaseOffset = RL.getBaseClassOffsetInBits(Base);
+ BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base));
// FIXME: Inconsistent units for BaseOffset. It is in bytes when
// BI->isVirtual() and bits when not.
@@ -1083,7 +1084,7 @@ CollectTemplateParams(const TemplateParameterList *TPList,
llvm::DIType TTy = getOrCreateType(TA.getIntegralType(), Unit);
llvm::DITemplateValueParameter TVP =
DBuilder.createTemplateValueParameter(TheCU, ND->getName(), TTy,
- TA.getAsIntegral()->getZExtValue());
+ TA.getAsIntegral().getZExtValue());
TemplateParams.push_back(TVP);
}
}
@@ -1177,6 +1178,7 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// getOrCreateRecordType - Emit record type's standalone debug info.
llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
SourceLocation Loc) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
return T;
}
@@ -1185,6 +1187,7 @@ llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
/// debug info.
llvm::DIType CGDebugInfo::getOrCreateInterfaceType(QualType D,
SourceLocation Loc) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
llvm::DIType T = getOrCreateType(D, getOrCreateFile(Loc));
DBuilder.retainType(T);
return T;
@@ -1287,7 +1290,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
if (!Def) {
llvm::DIType FwdDecl =
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- ID->getName(), DefUnit, Line,
+ ID->getName(), TheCU, DefUnit, Line,
RuntimeLang);
return FwdDecl;
}
@@ -1385,8 +1388,8 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// the non-fragile abi and the debugger should ignore the value anyways.
// Call it the FieldNo+1 due to how debuggers use the information,
// e.g. negating the value when it needs a lookup in the dynamic table.
- uint64_t FieldOffset = CGM.getLangOpts().ObjCNonFragileABI ? FieldNo+1
- : RL.getFieldOffset(FieldNo);
+ uint64_t FieldOffset = CGM.getLangOpts().ObjCRuntime.isNonFragile()
+ ? FieldNo+1 : RL.getFieldOffset(FieldNo);
unsigned Flags = 0;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
@@ -1456,7 +1459,6 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
uint64_t Size;
uint64_t Align;
-
// FIXME: make getTypeAlign() aware of VLAs and incomplete array types
if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
Size = 0;
@@ -1464,7 +1466,10 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
} else if (Ty->isIncompleteArrayType()) {
Size = 0;
- Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+ if (Ty->getElementType()->isIncompleteType())
+ Align = 0;
+ else
+ Align = CGM.getContext().getTypeAlign(Ty->getElementType());
} else if (Ty->isDependentSizedArrayType() || Ty->isIncompleteType()) {
Size = 0;
Align = 0;
@@ -1479,25 +1484,21 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
// obvious/recursive way?
SmallVector<llvm::Value *, 8> Subscripts;
QualType EltTy(Ty, 0);
- if (Ty->isIncompleteArrayType())
+ while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+ int64_t UpperBound = 0;
+ int64_t LowerBound = 0;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty)) {
+ if (CAT->getSize().getZExtValue())
+ UpperBound = CAT->getSize().getZExtValue() - 1;
+ } else
+ // This is an unbounded array. Use Low = 1, Hi = 0 to express such
+ // arrays.
+ LowerBound = 1;
+
+ // FIXME: Verify this is right for VLAs.
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(LowerBound,
+ UpperBound));
EltTy = Ty->getElementType();
- else {
- while ((Ty = dyn_cast<ArrayType>(EltTy))) {
- int64_t UpperBound = 0;
- int64_t LowerBound = 0;
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty)) {
- if (CAT->getSize().getZExtValue())
- UpperBound = CAT->getSize().getZExtValue() - 1;
- } else
- // This is an unbounded array. Use Low = 1, Hi = 0 to express such
- // arrays.
- LowerBound = 1;
-
- // FIXME: Verify this is right for VLAs.
- Subscripts.push_back(DBuilder.getOrCreateSubrange(LowerBound,
- UpperBound));
- EltTy = Ty->getElementType();
- }
}
llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
@@ -1537,7 +1538,7 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
uint64_t FieldOffset = 0;
llvm::Value *ElementTypes[2];
- // FIXME: This should probably be a function type instead.
+ // FIXME: This should be a DW_TAG_pointer_to_member type.
ElementTypes[0] =
DBuilder.createMemberType(U, "ptr", U, 0,
Info.first, Info.second, FieldOffset, 0,
@@ -1565,7 +1566,6 @@ llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
/// CreateEnumType - get enumeration type.
llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
- llvm::DIFile Unit = getOrCreateFile(ED->getLocation());
SmallVector<llvm::Value *, 16> Enumerators;
// Create DIEnumerator elements for each enumerator.
@@ -1590,9 +1590,13 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
}
llvm::DIDescriptor EnumContext =
getContextDescriptor(cast<Decl>(ED->getDeclContext()));
+ llvm::DIType ClassTy = ED->isScopedUsingClassTag() ?
+ getOrCreateType(ED->getIntegerType(), DefUnit) : llvm::DIType();
+ unsigned Flags = !ED->isCompleteDefinition() ? llvm::DIDescriptor::FlagFwdDecl : 0;
llvm::DIType DbgTy =
DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
- Size, Align, EltArray);
+ Size, Align, EltArray,
+ ClassTy, Flags);
return DbgTy;
}
@@ -1626,8 +1630,13 @@ static QualType UnwrapTypeForDebugInfo(QualType T) {
case Type::Paren:
T = cast<ParenType>(T)->getInnerType();
break;
- case Type::SubstTemplateTypeParm:
+ case Type::SubstTemplateTypeParm: {
+ // We need to keep the qualifiers handy since getReplacementType()
+ // will strip them away.
+ unsigned Quals = T.getLocalFastQualifiers();
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
+ T.addFastQualifiers(Quals);
+ }
break;
case Type::Auto:
T = cast<AutoType>(T)->getDeducedType();
@@ -1686,23 +1695,26 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
// Unwrap the type as needed for debug information.
Ty = UnwrapTypeForDebugInfo(Ty);
-
+
llvm::DIType T = getCompletedTypeOrNull(Ty);
- if (T.Verify()) return T;
+ if (T.Verify())
+ return T;
// Otherwise create the type.
llvm::DIType Res = CreateTypeNode(Ty, Unit);
llvm::DIType TC = getTypeOrNull(Ty);
if (TC.Verify() && TC.isForwardDecl())
- ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), TC));
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(),
+ static_cast<llvm::Value*>(TC)));
// And update the type cache.
TypeCache[Ty.getAsOpaquePtr()] = Res;
if (!Res.isForwardDecl())
CompletedTypeCache[Ty.getAsOpaquePtr()] = Res;
+
return Res;
}
@@ -1807,7 +1819,8 @@ llvm::DIType CGDebugInfo::getOrCreateLimitedType(QualType Ty,
llvm::DIType Res = CreateLimitedTypeNode(Ty, Unit);
if (T.Verify() && T.isForwardDecl())
- ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), T));
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(),
+ static_cast<llvm::Value*>(T)));
// And update the type cache.
TypeCache[Ty.getAsOpaquePtr()] = Res;
@@ -1824,7 +1837,7 @@ llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
StringRef RDName = RD->getName();
llvm::DIDescriptor RDContext;
- if (CGM.getCodeGenOpts().LimitDebugInfo)
+ if (CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo)
RDContext = createContextChain(cast<Decl>(RD->getDeclContext()));
else
RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext()));
@@ -1953,6 +1966,7 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
QualType FnType,
llvm::DIFile F) {
+
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F);
if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
@@ -2013,18 +2027,21 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
LinkageName = CGM.getMangledName(GD);
Flags |= llvm::DIDescriptor::FlagPrototyped;
}
- if (LinkageName == Name)
+ if (LinkageName == Name ||
+ CGM.getCodeGenOpts().DebugInfo <= CodeGenOptions::DebugLineTablesOnly)
LinkageName = StringRef();
- if (const NamespaceDecl *NSDecl =
- dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
- FDContext = getOrCreateNameSpace(NSDecl);
- else if (const RecordDecl *RDecl =
- dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
- FDContext = getContextDescriptor(cast<Decl>(RDecl->getDeclContext()));
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ if (const NamespaceDecl *NSDecl =
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ FDContext = getOrCreateNameSpace(NSDecl);
+ else if (const RecordDecl *RDecl =
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
+ FDContext = getContextDescriptor(cast<Decl>(RDecl->getDeclContext()));
- // Collect template parameters.
- TParamsArray = CollectFunctionTemplateParams(FD, Unit);
+ // Collect template parameters.
+ TParamsArray = CollectFunctionTemplateParams(FD, Unit);
+ }
} else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
Name = getObjCMethodName(OMD);
Flags |= llvm::DIDescriptor::FlagPrototyped;
@@ -2040,14 +2057,27 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
if (D->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
- llvm::DISubprogram SPDecl = getFunctionDeclaration(D);
- llvm::DISubprogram SP =
- DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
- LineNo, getOrCreateFunctionType(D, FnType, Unit),
- Fn->hasInternalLinkage(), true/*definition*/,
- getLineNumber(CurLoc),
- Flags, CGM.getLangOpts().Optimize, Fn,
- TParamsArray, SPDecl);
+ llvm::DIType DIFnType;
+ llvm::DISubprogram SPDecl;
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ DIFnType = getOrCreateFunctionType(D, FnType, Unit);
+ SPDecl = getFunctionDeclaration(D);
+ } else {
+ // Create fake but valid subroutine type. Otherwise
+ // llvm::DISubprogram::Verify() would return false, and
+ // subprogram DIE will miss DW_AT_decl_file and
+ // DW_AT_decl_line fields.
+ SmallVector<llvm::Value*, 16> Elts;
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+ DIFnType = DBuilder.createSubroutineType(Unit, EltTypeArray);
+ }
+ llvm::DISubprogram SP;
+ SP = DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
+ LineNo, DIFnType,
+ Fn->hasInternalLinkage(), true/*definition*/,
+ getLineNumber(CurLoc), Flags,
+ CGM.getLangOpts().Optimize,
+ Fn, TParamsArray, SPDecl);
// Push function on region stack.
llvm::MDNode *SPN = SP;
@@ -2205,6 +2235,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Value *Storage,
unsigned ArgNo, CGBuilderTy &Builder) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
@@ -2224,14 +2255,14 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
// If Storage is an aggregate returned as 'sret' then let debugger know
// about this.
if (Arg->hasStructRetAttr())
- Ty = DBuilder.createReferenceType(Ty);
+ Ty = DBuilder.createReferenceType(llvm::dwarf::DW_TAG_reference_type, Ty);
else if (CXXRecordDecl *Record = VD->getType()->getAsCXXRecordDecl()) {
// If an aggregate variable has non trivial destructor or non trivial copy
// constructor than it is pass indirectly. Let debug info know about this
// by using reference of the aggregate type as a argument type.
if (!Record->hasTrivialCopyConstructor() ||
!Record->hasTrivialDestructor())
- Ty = DBuilder.createReferenceType(Ty);
+ Ty = DBuilder.createReferenceType(llvm::dwarf::DW_TAG_reference_type, Ty);
}
}
@@ -2272,8 +2303,25 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
return;
- }
+ } else if (isa<VariableArrayType>(VD->getType())) {
+ // These are "complex" variables in that they need an op_deref.
// Create the descriptor for the variable.
+ llvm::Value *Addr = llvm::ConstantInt::get(CGM.Int64Ty,
+ llvm::DIBuilder::OpDeref);
+ llvm::DIVariable D =
+ DBuilder.createComplexVariable(Tag,
+ llvm::DIDescriptor(Scope),
+ Name, Unit, Line, Ty,
+ Addr, ArgNo);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ return;
+ }
+
+ // Create the descriptor for the variable.
llvm::DIVariable D =
DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
Name, Unit, Line, Ty,
@@ -2321,12 +2369,14 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
llvm::Value *Storage,
CGBuilderTy &Builder) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder);
}
void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
const CGBlockInfo &blockInfo) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
if (Builder.GetInsertBlock() == 0)
@@ -2387,6 +2437,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
unsigned ArgNo,
CGBuilderTy &Builder) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, ArgNo, Builder);
}
@@ -2403,6 +2454,7 @@ namespace {
void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::Value *addr,
CGBuilderTy &Builder) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
ASTContext &C = CGM.getContext();
const BlockDecl *blockDecl = block.getBlockDecl();
@@ -2547,6 +2599,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
/// EmitGlobalVariable - Emit information about a global variable.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(D->getLocation());
unsigned LineNo = getLineNumber(D->getLocation());
@@ -2557,9 +2610,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
if (T->isIncompleteArrayType()) {
// CodeGen turns int[] into int[1] so we'll do the same here.
- llvm::APSInt ConstVal(32);
-
- ConstVal = 1;
+ llvm::APInt ConstVal(32, 1);
QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
T = CGM.getContext().getConstantArrayType(ET, ConstVal,
@@ -2582,6 +2633,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
/// EmitGlobalVariable - Emit information about an objective-c interface.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ObjCInterfaceDecl *ID) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
unsigned LineNo = getLineNumber(ID->getLocation());
@@ -2592,9 +2644,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
if (T->isIncompleteArrayType()) {
// CodeGen turns int[] into int[1] so we'll do the same here.
- llvm::APSInt ConstVal(32);
-
- ConstVal = 1;
+ llvm::APInt ConstVal(32, 1);
QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
T = CGM.getContext().getConstantArrayType(ET, ConstVal,
@@ -2609,13 +2659,15 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
/// EmitGlobalVariable - Emit global variable's debug info.
void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
llvm::Constant *Init) {
+ assert(CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo);
// Create the descriptor for the variable.
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
StringRef Name = VD->getName();
llvm::DIType Ty = getOrCreateType(VD->getType(), Unit);
if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) {
- if (const EnumDecl *ED = dyn_cast<EnumDecl>(ECD->getDeclContext()))
- Ty = CreateEnumType(ED);
+ const EnumDecl *ED = cast<EnumDecl>(ECD->getDeclContext());
+ assert(isa<EnumType>(ED->getTypeForDecl()) && "Enum without EnumType?");
+ Ty = getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit);
}
// Do not use DIGlobalVariable for enums.
if (Ty.getTag() == llvm::dwarf::DW_TAG_enumeration_type)
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index ec7705c..44cc49a 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -17,9 +17,9 @@
#include "clang/AST/Type.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/SourceLocation.h"
+#include "llvm/DebugInfo.h"
+#include "llvm/DIBuilder.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Analysis/DIBuilder.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/Allocator.h"
@@ -30,6 +30,7 @@ namespace llvm {
}
namespace clang {
+ class CXXMethodDecl;
class VarDecl;
class ObjCInterfaceDecl;
class ClassTemplateSpecializationDecl;
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 6447779..be6638e 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -188,11 +188,15 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
new llvm::GlobalVariable(CGM.getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
CGM.EmitNullConstant(D.getType()), Name, 0,
- D.isThreadSpecified(),
+ llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(Ty));
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
if (Linkage != llvm::GlobalValue::InternalLinkage)
GV->setVisibility(CurFn->getVisibility());
+
+ if (D.isThreadSpecified())
+ CGM.setTLSMode(GV, D);
+
return GV;
}
@@ -239,7 +243,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
OldGV->isConstant(),
OldGV->getLinkage(), Init, "",
/*InsertBefore*/ OldGV,
- D.isThreadSpecified(),
+ OldGV->getThreadLocalMode(),
CGM.getContext().getTargetAddressSpace(D.getType()));
GV->setVisibility(OldGV->getVisibility());
@@ -326,7 +330,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
- if (DI) {
+ if (DI &&
+ CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
DI->EmitGlobalVariable(var, &D);
}
@@ -489,6 +494,14 @@ static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
return (ref->getDecl() == &var);
+ if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
+ const BlockDecl *block = be->getBlockDecl();
+ for (BlockDecl::capture_const_iterator i = block->capture_begin(),
+ e = block->capture_end(); i != e; ++i) {
+ if (i->getVariable() == &var)
+ return true;
+ }
+ }
}
for (Stmt::const_child_range children = s->children(); children; ++children)
@@ -897,11 +910,14 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Emit debug info for local var declaration.
if (HaveInsertPoint())
if (CGDebugInfo *DI = getDebugInfo()) {
- DI->setLocation(D.getLocation());
- if (Target.useGlobalsForAutomaticVariables()) {
- DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
- } else
- DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ DI->setLocation(D.getLocation());
+ if (Target.useGlobalsForAutomaticVariables()) {
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr),
+ &D);
+ } else
+ DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ }
}
if (D.hasAttr<AnnotateAttr>())
@@ -1054,7 +1070,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage,
- constant, Name, 0, false, 0);
+ constant, Name);
GV->setAlignment(alignment.getQuantity());
GV->setUnnamedAddr(true);
@@ -1477,8 +1493,11 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
LocalDeclMap[&D] = Arg;
if (CGDebugInfo *DI = getDebugInfo()) {
- DI->setLocation(D.getLocation());
- DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, Builder);
+ if (CGM.getCodeGenOpts().DebugInfo >=
+ CodeGenOptions::LimitedDebugInfo) {
+ DI->setLocation(D.getLocation());
+ DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, Builder);
+ }
}
return;
@@ -1556,8 +1575,11 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
DMEntry = DeclPtr;
// Emit debug info for param declaration.
- if (CGDebugInfo *DI = getDebugInfo())
- DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo) {
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
+ }
+ }
if (D.hasAttr<AnnotateAttr>())
EmitVarAnnotations(&D, DeclPtr);
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 10f0b83..492b95a 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -98,7 +98,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
}
- CGF.EmitCXXGlobalDtorRegistration(function, argument);
+ CGM.getCXXABI().registerGlobalDtor(CGF, function, argument);
}
/// Emit code to cause the variable at the given address to be considered as
@@ -145,39 +145,6 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
-/// Register a global destructor using __cxa_atexit.
-static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
- llvm::Constant *dtor,
- llvm::Constant *addr) {
- // We're assuming that the destructor function is something we can
- // reasonably call with the default CC. Go ahead and cast it to the
- // right prototype.
- llvm::Type *dtorTy =
- llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
-
- // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
- llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
- llvm::FunctionType *atexitTy =
- llvm::FunctionType::get(CGF.IntTy, paramTys, false);
-
- // Fetch the actual function.
- llvm::Constant *atexit =
- CGF.CGM.CreateRuntimeFunction(atexitTy, "__cxa_atexit");
- if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
- fn->setDoesNotThrow();
-
- // Create a variable that binds the atexit to this shared object.
- llvm::Constant *handle =
- CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
-
- llvm::Value *args[] = {
- llvm::ConstantExpr::getBitCast(dtor, dtorTy),
- llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
- handle
- };
- CGF.Builder.CreateCall(atexit, args);
-}
-
static llvm::Function *
CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
llvm::FunctionType *ty,
@@ -212,43 +179,22 @@ static llvm::Constant *createAtExitStub(CodeGenModule &CGM,
return fn;
}
-/// Register a global destructor using atexit.
-static void emitGlobalDtorWithAtExit(CodeGenFunction &CGF,
- llvm::Constant *dtor,
- llvm::Constant *addr) {
+/// Register a global destructor using the C atexit runtime function.
+void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtor,
+ llvm::Constant *addr) {
// Create a function which calls the destructor.
- llvm::Constant *dtorStub = createAtExitStub(CGF.CGM, dtor, addr);
+ llvm::Constant *dtorStub = createAtExitStub(CGM, dtor, addr);
// extern "C" int atexit(void (*f)(void));
llvm::FunctionType *atexitTy =
- llvm::FunctionType::get(CGF.IntTy, dtorStub->getType(), false);
+ llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
llvm::Constant *atexit =
- CGF.CGM.CreateRuntimeFunction(atexitTy, "atexit");
+ CGM.CreateRuntimeFunction(atexitTy, "atexit");
if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
atexitFn->setDoesNotThrow();
- CGF.Builder.CreateCall(atexit, dtorStub);
-}
-
-void CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *dtor,
- llvm::Constant *addr) {
- // Use __cxa_atexit if available.
- if (CGM.getCodeGenOpts().CXAAtExit) {
- emitGlobalDtorWithCXAAtExit(*this, dtor, addr);
- return;
- }
-
- // In Apple kexts, we want to add a global destructor entry.
- // FIXME: shouldn't this be guarded by some variable?
- if (CGM.getContext().getLangOpts().AppleKext) {
- // Generate a global destructor entry.
- CGM.AddCXXDtorEntry(dtor, addr);
- return;
- }
-
- // Otherwise, we just use atexit.
- emitGlobalDtorWithAtExit(*this, dtor, addr);
+ Builder.CreateCall(atexit, dtorStub)->setDoesNotThrow();
}
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
@@ -282,6 +228,9 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
if (!CGM.getLangOpts().Exceptions)
Fn->setDoesNotThrow();
+ if (CGM.getLangOpts().AddressSanitizer)
+ Fn->addFnAttr(llvm::Attribute::AddressSafety);
+
return Fn;
}
@@ -372,9 +321,12 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
llvm::GlobalVariable *Addr,
bool PerformInit) {
- StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ if (CGM.getModuleDebugInfo() && !D->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
- FunctionArgList(), SourceLocation());
+ FunctionArgList(), D->getInit()->getExprLoc());
// Use guarded initialization if the global variable is weak. This
// occurs for, e.g., instantiated static data members and
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 95e0030..ba9c296 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -126,7 +126,7 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
if (CGF.getLangOpts().CPlusPlus)
name = "_ZSt9terminatev"; // FIXME: mangling!
else if (CGF.getLangOpts().ObjC1 &&
- CGF.CGM.getCodeGenOpts().ObjCRuntimeHasTerminate)
+ CGF.getLangOpts().ObjCRuntime.hasTerminate())
name = "objc_terminate";
else
name = "abort";
@@ -180,12 +180,18 @@ static const EHPersonality &getCPersonality(const LangOptions &L) {
}
static const EHPersonality &getObjCPersonality(const LangOptions &L) {
- if (L.NeXTRuntime) {
- if (L.ObjCNonFragileABI) return EHPersonality::NeXT_ObjC;
- else return getCPersonality(L);
- } else {
+ switch (L.ObjCRuntime.getKind()) {
+ case ObjCRuntime::FragileMacOSX:
+ return getCPersonality(L);
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ return EHPersonality::NeXT_ObjC;
+ case ObjCRuntime::GNUstep:
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
return EHPersonality::GNU_ObjC;
}
+ llvm_unreachable("bad runtime kind");
}
static const EHPersonality &getCXXPersonality(const LangOptions &L) {
@@ -198,22 +204,28 @@ static const EHPersonality &getCXXPersonality(const LangOptions &L) {
/// Determines the personality function to use when both C++
/// and Objective-C exceptions are being caught.
static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
+ switch (L.ObjCRuntime.getKind()) {
// The ObjC personality defers to the C++ personality for non-ObjC
// handlers. Unlike the C++ case, we use the same personality
// function on targets using (backend-driven) SJLJ EH.
- if (L.NeXTRuntime) {
- if (L.ObjCNonFragileABI)
- return EHPersonality::NeXT_ObjC;
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ return EHPersonality::NeXT_ObjC;
- // In the fragile ABI, just use C++ exception handling and hope
- // they're not doing crazy exception mixing.
- else
- return getCXXPersonality(L);
- }
+ // In the fragile ABI, just use C++ exception handling and hope
+ // they're not doing crazy exception mixing.
+ case ObjCRuntime::FragileMacOSX:
+ return getCXXPersonality(L);
- // The GNU runtime's personality function inherently doesn't support
+ // The GCC runtime's personality function inherently doesn't support
// mixed EH. Use the C++ personality just to avoid returning null.
- return EHPersonality::GNU_ObjCXX;
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW: // XXX: this will change soon
+ return EHPersonality::GNU_ObjC;
+ case ObjCRuntime::GNUstep:
+ return EHPersonality::GNU_ObjCXX;
+ }
+ llvm_unreachable("bad runtime kind");
}
const EHPersonality &EHPersonality::get(const LangOptions &L) {
@@ -1127,14 +1139,6 @@ static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) {
CGF.EmitAutoVarCleanups(var);
}
-namespace {
- struct CallRethrow : EHScopeStack::Cleanup {
- void Emit(CodeGenFunction &CGF, Flags flags) {
- CGF.EmitCallOrInvoke(getReThrowFn(CGF));
- }
- };
-}
-
/// Emit the structure of the dispatch block for the given catch scope.
/// It is an invariant that the dispatch block already exists.
static void emitCatchDispatchBlock(CodeGenFunction &CGF,
@@ -1246,11 +1250,12 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
if (HaveInsertPoint())
Builder.CreateBr(ContBB);
- // Determine if we need an implicit rethrow for all these catch handlers.
- bool ImplicitRethrow = false;
+ // Determine if we need an implicit rethrow for all these catch handlers;
+ // see the comment below.
+ bool doImplicitRethrow = false;
if (IsFnTryBlock)
- ImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
- isa<CXXConstructorDecl>(CurCodeDecl);
+ doImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
+ isa<CXXConstructorDecl>(CurCodeDecl);
// Perversely, we emit the handlers backwards precisely because we
// want them to appear in source order. In all of these cases, the
@@ -1273,15 +1278,24 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// Initialize the catch variable and set up the cleanups.
BeginCatch(*this, C);
- // If there's an implicit rethrow, push a normal "cleanup" to call
- // _cxa_rethrow. This needs to happen before __cxa_end_catch is
- // called, and so it is pushed after BeginCatch.
- if (ImplicitRethrow)
- EHStack.pushCleanup<CallRethrow>(NormalCleanup);
-
// Perform the body of the catch.
EmitStmt(C->getHandlerBlock());
+ // [except.handle]p11:
+ // The currently handled exception is rethrown if control
+ // reaches the end of a handler of the function-try-block of a
+ // constructor or destructor.
+
+ // It is important that we only do this on fallthrough and not on
+ // return. Note that it's illegal to put a return in a
+ // constructor function-try-block's catch handler (p14), so this
+ // really only applies to destructors.
+ if (doImplicitRethrow && HaveInsertPoint()) {
+ EmitCallOrInvoke(getReThrowFn(*this));
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ }
+
// Fall out through the catch cleanups.
CatchScope.ForceCleanup();
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 5f2b1f0..ecee7b4 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -21,10 +21,11 @@
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/ConvertUTF.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
-#include "llvm/Support/MDBuilder.h"
+#include "llvm/MDBuilder.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
@@ -108,15 +109,18 @@ void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
/// can have any type. The result is returned as an RValue struct.
/// If this is an aggregate expression, AggSlot indicates where the
/// result should be returned.
-RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
- bool IgnoreResult) {
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
+ AggValueSlot aggSlot,
+ bool ignoreResult) {
if (!hasAggregateLLVMType(E->getType()))
- return RValue::get(EmitScalarExpr(E, IgnoreResult));
+ return RValue::get(EmitScalarExpr(E, ignoreResult));
else if (E->getType()->isAnyComplexType())
- return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
+ return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
- EmitAggExpr(E, AggSlot, IgnoreResult);
- return AggSlot.asRValue();
+ if (!ignoreResult && aggSlot.isIgnored())
+ aggSlot = CreateAggTemp(E->getType(), "agg-temp");
+ EmitAggExpr(E, aggSlot);
+ return aggSlot.asRValue();
}
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
@@ -156,7 +160,11 @@ namespace {
/// \brief An adjustment to be made to the temporary created when emitting a
/// reference binding, which accesses a particular subobject of that temporary.
struct SubobjectAdjustment {
- enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
+ enum {
+ DerivedToBaseAdjustment,
+ FieldAdjustment,
+ MemberPointerAdjustment
+ } Kind;
union {
struct {
@@ -165,6 +173,11 @@ namespace {
} DerivedToBase;
FieldDecl *Field;
+
+ struct {
+ const MemberPointerType *MPT;
+ llvm::Value *Ptr;
+ } Ptr;
};
SubobjectAdjustment(const CastExpr *BasePath,
@@ -178,6 +191,12 @@ namespace {
: Kind(FieldAdjustment) {
this->Field = Field;
}
+
+ SubobjectAdjustment(const MemberPointerType *MPT, llvm::Value *Ptr)
+ : Kind(MemberPointerAdjustment) {
+ this->Ptr.MPT = MPT;
+ this->Ptr.Ptr = Ptr;
+ }
};
}
@@ -345,6 +364,15 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
continue;
}
}
+ } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->isPtrMemOp()) {
+ assert(BO->getLHS()->isRValue());
+ E = BO->getLHS();
+ const MemberPointerType *MPT =
+ BO->getRHS()->getType()->getAs<MemberPointerType>();
+ llvm::Value *Ptr = CGF.EmitScalarExpr(BO->getRHS());
+ Adjustments.push_back(SubobjectAdjustment(MPT, Ptr));
+ }
}
if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
@@ -417,6 +445,11 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
break;
}
+ case SubobjectAdjustment::MemberPointerAdjustment: {
+ Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
+ CGF, Object, Adjustment.Ptr.Ptr, Adjustment.Ptr.MPT);
+ break;
+ }
}
}
@@ -462,7 +495,7 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
if (ReferenceTemporaryDtor) {
llvm::Constant *DtorFn =
CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
- EmitCXXGlobalDtorRegistration(DtorFn,
+ CGM.getCXXABI().registerGlobalDtor(*this, DtorFn,
cast<llvm::Constant>(ReferenceTemporary));
} else {
assert(!ObjCARCReferenceLifetimeType.isNull());
@@ -525,15 +558,9 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
- // In time, people may want to control this and use a 1 here.
- llvm::Value *Arg = Builder.getFalse();
- llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
+ llvm::Value *Min = Builder.getFalse();
+ llvm::Value *C = Builder.CreateCall2(F, Address, Min);
llvm::BasicBlock *Cont = createBasicBlock();
- llvm::BasicBlock *Check = createBasicBlock();
- llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
- Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
-
- EmitBlock(Check);
Builder.CreateCondBr(Builder.CreateICmpUGE(C,
llvm::ConstantInt::get(IntPtrTy, Size)),
Cont, getTrapBB());
@@ -676,10 +703,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::PseudoObjectExprClass:
return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
case Expr::InitListExprClass:
- assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
- "Only single-element init list can be lvalue.");
- return EmitLValue(cast<InitListExpr>(E)->getInit(0));
-
+ return EmitInitListLValue(cast<InitListExpr>(E));
case Expr::CXXTemporaryObjectExprClass:
case Expr::CXXConstructExprClass:
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
@@ -880,7 +904,6 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
CGM.getCodeGenOpts().StrictEnums &&
!ET->getDecl()->isFixed());
bool IsBool = hasBooleanRepresentation(Ty);
- llvm::Type *LTy;
if (!IsBool && !IsRegularCPlusPlusEnum)
return NULL;
@@ -889,10 +912,9 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
if (IsBool) {
Min = llvm::APInt(8, 0);
End = llvm::APInt(8, 2);
- LTy = Int8Ty;
} else {
const EnumDecl *ED = ET->getDecl();
- LTy = ConvertTypeForMem(ED->getIntegerType());
+ llvm::Type *LTy = ConvertTypeForMem(ED->getIntegerType());
unsigned Bitwidth = LTy->getScalarSizeInBits();
unsigned NumNegativeBits = ED->getNumNegativeBits();
unsigned NumPositiveBits = ED->getNumPositiveBits();
@@ -1028,6 +1050,9 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
llvm::Value *Res = 0;
for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+ CharUnits AccessAlignment = AI.AccessAlignment;
+ if (!LV.getAlignment().isZero())
+ AccessAlignment = std::min(AccessAlignment, LV.getAlignment());
// Get the field pointer.
llvm::Value *Ptr = LV.getBitFieldBaseAddr();
@@ -1051,8 +1076,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
// Perform the load.
llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
- if (!AI.AccessAlignment.isZero())
- Load->setAlignment(AI.AccessAlignment.getQuantity());
+ Load->setAlignment(AccessAlignment.getQuantity());
// Shift out unused low bits and mask out unused high bits.
llvm::Value *Val = Load;
@@ -1251,6 +1275,9 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Iterate over the components, writing each piece to memory.
for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+ CharUnits AccessAlignment = AI.AccessAlignment;
+ if (!Dst.getAlignment().isZero())
+ AccessAlignment = std::min(AccessAlignment, Dst.getAlignment());
// Get the field pointer.
llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
@@ -1297,8 +1324,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// If necessary, load and OR in bits that are outside of the bit-field.
if (AI.TargetBitWidth != AI.AccessWidth) {
llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
- if (!AI.AccessAlignment.isZero())
- Load->setAlignment(AI.AccessAlignment.getQuantity());
+ Load->setAlignment(AccessAlignment.getQuantity());
// Compute the mask for zeroing the bits that are part of the bit-field.
llvm::APInt InvMask =
@@ -1312,8 +1338,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Write the value.
llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
Dst.isVolatileQualified());
- if (!AI.AccessAlignment.isZero())
- Store->setAlignment(AI.AccessAlignment.getQuantity());
+ Store->setAlignment(AccessAlignment.getQuantity());
}
}
@@ -1683,6 +1708,39 @@ LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
E->getType());
}
+static llvm::Constant*
+GetAddrOfConstantWideString(StringRef Str,
+ const char *GlobalName,
+ ASTContext &Context,
+ QualType Ty, SourceLocation Loc,
+ CodeGenModule &CGM) {
+
+ StringLiteral *SL = StringLiteral::Create(Context,
+ Str,
+ StringLiteral::Wide,
+ /*Pascal = */false,
+ Ty, Loc);
+ llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ !CGM.getLangOpts().WritableStrings,
+ llvm::GlobalValue::PrivateLinkage,
+ C, GlobalName);
+ const unsigned WideAlignment =
+ Context.getTypeAlignInChars(Ty).getQuantity();
+ GV->setAlignment(WideAlignment);
+ return GV;
+}
+
+static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
+ SmallString<32>& Target) {
+ Target.resize(CharByteWidth * (Source.size() + 1));
+ char* ResultPtr = &Target[0];
+ bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr);
+ (void)success;
+ assert(success);
+ Target.resize(ResultPtr - &Target[0]);
+}
LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
switch (E->getIdentType()) {
@@ -1691,11 +1749,12 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
case PredefinedExpr::Func:
case PredefinedExpr::Function:
+ case PredefinedExpr::LFunction:
case PredefinedExpr::PrettyFunction: {
- unsigned Type = E->getIdentType();
+ unsigned IdentType = E->getIdentType();
std::string GlobalVarName;
- switch (Type) {
+ switch (IdentType) {
default: llvm_unreachable("Invalid type");
case PredefinedExpr::Func:
GlobalVarName = "__func__.";
@@ -1703,6 +1762,9 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
case PredefinedExpr::Function:
GlobalVarName = "__FUNCTION__.";
break;
+ case PredefinedExpr::LFunction:
+ GlobalVarName = "L__FUNCTION__.";
+ break;
case PredefinedExpr::PrettyFunction:
GlobalVarName = "__PRETTY_FUNCTION__.";
break;
@@ -1720,10 +1782,27 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
std::string FunctionName =
(isa<BlockDecl>(CurDecl)
? FnName.str()
- : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
-
- llvm::Constant *C =
- CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
+ : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType,
+ CurDecl));
+
+ const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual();
+ llvm::Constant *C;
+ if (ElemType->isWideCharType()) {
+ SmallString<32> RawChars;
+ ConvertUTF8ToWideString(
+ getContext().getTypeSizeInChars(ElemType).getQuantity(),
+ FunctionName, RawChars);
+ C = GetAddrOfConstantWideString(RawChars,
+ GlobalVarName.c_str(),
+ getContext(),
+ E->getType(),
+ E->getLocation(),
+ CGM);
+ } else {
+ C = CGM.GetAddrOfConstantCString(FunctionName,
+ GlobalVarName.c_str(),
+ 1);
+ }
return MakeAddrLValue(C, E->getType());
}
}
@@ -1794,25 +1873,6 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Extend or truncate the index type to 32 or 64-bits.
if (Idx->getType() != IntPtrTy)
Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
-
- // FIXME: As llvm implements the object size checking, this can come out.
- if (CatchUndefined) {
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
- if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
- if (const ConstantArrayType *CAT
- = getContext().getAsConstantArrayType(DRE->getType())) {
- llvm::APInt Size = CAT->getSize();
- llvm::BasicBlock *Cont = createBasicBlock("cont");
- Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
- llvm::ConstantInt::get(Idx->getType(), Size)),
- Cont, getTrapBB());
- EmitBlock(Cont);
- }
- }
- }
- }
- }
// We know that the pointer points to a type of the correct size, unless the
// size is a VLA or Objective-C interface.
@@ -1996,43 +2056,17 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
llvm_unreachable("Unhandled member declaration!");
}
-LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
- const FieldDecl *Field,
- unsigned CVRQualifiers) {
- const CGRecordLayout &RL =
- CGM.getTypes().getCGRecordLayout(Field->getParent());
- const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
- return LValue::MakeBitfield(BaseValue, Info,
- Field->getType().withCVRQualifiers(CVRQualifiers));
-}
-
-/// EmitLValueForAnonRecordField - Given that the field is a member of
-/// an anonymous struct or union buried inside a record, and given
-/// that the base value is a pointer to the enclosing record, derive
-/// an lvalue for the ultimate field.
-LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
- const IndirectFieldDecl *Field,
- unsigned CVRQualifiers) {
- IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
- IEnd = Field->chain_end();
- while (true) {
- QualType RecordTy =
- getContext().getTypeDeclType(cast<FieldDecl>(*I)->getParent());
- LValue LV = EmitLValueForField(MakeAddrLValue(BaseValue, RecordTy),
- cast<FieldDecl>(*I));
- if (++I == IEnd) return LV;
-
- assert(LV.isSimple());
- BaseValue = LV.getAddress();
- CVRQualifiers |= LV.getVRQualifiers();
- }
-}
-
LValue CodeGenFunction::EmitLValueForField(LValue base,
const FieldDecl *field) {
- if (field->isBitField())
- return EmitLValueForBitfield(base.getAddress(), field,
- base.getVRQualifiers());
+ if (field->isBitField()) {
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(field->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
+ QualType fieldType =
+ field->getType().withCVRQualifiers(base.getVRQualifiers());
+ return LValue::MakeBitfield(base.getAddress(), Info, fieldType,
+ base.getAlignment());
+ }
const RecordDecl *rec = field->getParent();
QualType type = field->getType();
@@ -2144,7 +2178,10 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
return MakeAddrLValue(GlobalPtr, E->getType());
}
-
+ if (E->getType()->isVariablyModifiedType())
+ // make sure to emit the VLA size.
+ EmitVariablyModifiedType(E->getType());
+
llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
@@ -2155,6 +2192,16 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
return Result;
}
+LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
+ if (!E->isGLValue())
+ // Initializing an aggregate temporary in C++11: T{...}.
+ return EmitAggExprToLValue(E);
+
+ // An lvalue initializer list must be initializing a reference.
+ assert(E->getNumInits() == 1 && "reference init with multiple values");
+ return EmitLValue(E->getInit(0));
+}
+
LValue CodeGenFunction::
EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
if (!expr->isGLValue()) {
@@ -2214,11 +2261,11 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
return MakeAddrLValue(phi, expr->getType());
}
-/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
-/// If the cast is a dynamic_cast, we can have the usual lvalue result,
+/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
+/// type. If the cast is to a reference, we can have the usual lvalue result,
/// otherwise if a cast is needed by the code generator in an lvalue context,
/// then it must mean that we need the address of an aggregate in order to
-/// access one of its fields. This can happen for all the reasons that casts
+/// access one of its members. This can happen for all the reasons that casts
/// are permitted with aggregate result, including noop aggregate casts, and
/// cast from scalar to union.
LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
@@ -2648,7 +2695,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
const CGFunctionInfo &FnInfo =
- CGM.getTypes().arrangeFunctionCall(Args, FnType);
+ CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
// C99 6.5.2.2p6:
// If the expression that denotes the called function has a type
@@ -3038,7 +3085,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
getContext().IntTy);
const CGFunctionInfo &FuncInfo =
- CGM.getTypes().arrangeFunctionCall(RetTy, Args,
+ CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
FunctionType::ExtInfo(), RequiredArgs::All);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 7b0e0f5..61f7362 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -34,7 +34,6 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CodeGenFunction &CGF;
CGBuilderTy &Builder;
AggValueSlot Dest;
- bool IgnoreResult;
/// We want to use 'dest' as the return slot except under two
/// conditions:
@@ -56,12 +55,14 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
if (!Dest.isIgnored()) return Dest;
return CGF.CreateAggTemp(T, "agg.tmp.ensured");
}
+ void EnsureDest(QualType T) {
+ if (!Dest.isIgnored()) return;
+ Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
+ }
public:
- AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
- bool ignore)
- : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
- IgnoreResult(ignore) {
+ AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest)
+ : CGF(cgf), Builder(CGF.Builder), Dest(Dest) {
}
//===--------------------------------------------------------------------===//
@@ -74,9 +75,11 @@ public:
void EmitAggLoadOfLValue(const Expr *E);
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
- void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
- void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false,
- unsigned Alignment = 0);
+ void EmitFinalDestCopy(QualType type, const LValue &src);
+ void EmitFinalDestCopy(QualType type, RValue src,
+ CharUnits srcAlignment = CharUnits::Zero());
+ void EmitCopy(QualType type, const AggValueSlot &dest,
+ const AggValueSlot &src);
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
@@ -119,7 +122,7 @@ public:
if (E->getDecl()->getType()->isReferenceType()) {
if (CodeGenFunction::ConstantEmission result
= CGF.tryEmitAsConstant(E)) {
- EmitFinalDestCopy(E, result.getReferenceLValue(CGF, E));
+ EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
return;
}
}
@@ -171,7 +174,7 @@ public:
void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
if (E->isGLValue()) {
LValue LV = CGF.EmitPseudoObjectLValue(E);
- return EmitFinalDestCopy(E, LV);
+ return EmitFinalDestCopy(E->getType(), LV);
}
CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
@@ -198,7 +201,7 @@ public:
/// then loads the result into DestPtr.
void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
LValue LV = CGF.EmitLValue(E);
- EmitFinalDestCopy(E, LV);
+ EmitFinalDestCopy(E->getType(), LV);
}
/// \brief True if the given aggregate type requires special GC API calls.
@@ -228,7 +231,7 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
/// If nothing interferes, this will cause the result to be emitted
/// directly into the return value slot. Otherwise, a final move
/// will be performed.
-void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
+void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
if (shouldUseDestForReturnSlot()) {
// Logically, Dest.getAddr() should equal Src.getAggregateAddr().
// The possibility of undef rvalues complicates that a lot,
@@ -236,61 +239,58 @@ void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
return;
}
- // Otherwise, do a final copy,
- assert(Dest.getAddr() != Src.getAggregateAddr());
- std::pair<CharUnits, CharUnits> TypeInfo =
+ // Otherwise, copy from there to the destination.
+ assert(Dest.getAddr() != src.getAggregateAddr());
+ std::pair<CharUnits, CharUnits> typeInfo =
CGF.getContext().getTypeInfoInChars(E->getType());
- CharUnits Alignment = std::min(TypeInfo.second, Dest.getAlignment());
- EmitFinalDestCopy(E, Src, /*Ignore*/ true, Alignment.getQuantity());
+ EmitFinalDestCopy(E->getType(), src, typeInfo.second);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
- unsigned Alignment) {
- assert(Src.isAggregate() && "value must be aggregate value!");
+void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
+ CharUnits srcAlign) {
+ assert(src.isAggregate() && "value must be aggregate value!");
+ LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
+ EmitFinalDestCopy(type, srcLV);
+}
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
// If Dest is ignored, then we're evaluating an aggregate expression
- // in a context (like an expression statement) that doesn't care
- // about the result. C says that an lvalue-to-rvalue conversion is
- // performed in these cases; C++ says that it is not. In either
- // case, we don't actually need to do anything unless the value is
- // volatile.
- if (Dest.isIgnored()) {
- if (!Src.isVolatileQualified() ||
- CGF.CGM.getLangOpts().CPlusPlus ||
- (IgnoreResult && Ignore))
- return;
+ // in a context that doesn't care about the result. Note that loads
+ // from volatile l-values force the existence of a non-ignored
+ // destination.
+ if (Dest.isIgnored())
+ return;
- // If the source is volatile, we must read from it; to do that, we need
- // some place to put it.
- Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
- }
+ AggValueSlot srcAgg =
+ AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
+ needsGC(type), AggValueSlot::IsAliased);
+ EmitCopy(type, Dest, srcAgg);
+}
- if (Dest.requiresGCollection()) {
- CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
- llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
- llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
+/// Perform a copy from the source into the destination.
+///
+/// \param type - the type of the aggregate being copied; qualifiers are
+/// ignored
+void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
+ const AggValueSlot &src) {
+ if (dest.requiresGCollection()) {
+ CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
+ llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
- Dest.getAddr(),
- Src.getAggregateAddr(),
- SizeVal);
+ dest.getAddr(),
+ src.getAddr(),
+ size);
return;
}
- // If the result of the assignment is used, copy the LHS there also.
- // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
- // from the source as well, as we can't eliminate it if either operand
- // is volatile, unless copy has volatile for both source and destination..
- CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
- Dest.isVolatile()|Src.isVolatileQualified(),
- Alignment);
-}
-/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
- assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
-
- CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment());
- EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity());
+ // If the result of the assignment is used, copy the LHS there also.
+ // It's volatile if either side is. Use the minimum alignment of
+ // the two sides.
+ CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
+ dest.isVolatile() || src.isVolatile(),
+ std::min(dest.getAlignment(), src.getAlignment()));
}
static QualType GetStdInitializerListElementType(QualType T) {
@@ -526,7 +526,7 @@ void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
}
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
- EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
+ EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
}
void
@@ -582,7 +582,15 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
"should have been unpacked before we got here");
}
- case CK_LValueToRValue: // hope for downstream optimization
+ case CK_LValueToRValue:
+ // If we're loading from a volatile type, force the destination
+ // into existence.
+ if (E->getSubExpr()->getType().isVolatileQualified()) {
+ EnsureDest(E->getType());
+ return Visit(E->getSubExpr());
+ }
+ // fallthrough
+
case CK_NoOp:
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
@@ -676,7 +684,73 @@ void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
const BinaryOperator *E) {
LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
- EmitFinalDestCopy(E, LV);
+ EmitFinalDestCopy(E->getType(), LV);
+}
+
+/// Is the value of the given expression possibly a reference to or
+/// into a __block variable?
+static bool isBlockVarRef(const Expr *E) {
+ // Make sure we look through parens.
+ E = E->IgnoreParens();
+
+ // Check for a direct reference to a __block variable.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
+ return (var && var->hasAttr<BlocksAttr>());
+ }
+
+ // More complicated stuff.
+
+ // Binary operators.
+ if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
+ // For an assignment or pointer-to-member operation, just care
+ // about the LHS.
+ if (op->isAssignmentOp() || op->isPtrMemOp())
+ return isBlockVarRef(op->getLHS());
+
+ // For a comma, just care about the RHS.
+ if (op->getOpcode() == BO_Comma)
+ return isBlockVarRef(op->getRHS());
+
+ // FIXME: pointer arithmetic?
+ return false;
+
+ // Check both sides of a conditional operator.
+ } else if (const AbstractConditionalOperator *op
+ = dyn_cast<AbstractConditionalOperator>(E)) {
+ return isBlockVarRef(op->getTrueExpr())
+ || isBlockVarRef(op->getFalseExpr());
+
+ // OVEs are required to support BinaryConditionalOperators.
+ } else if (const OpaqueValueExpr *op
+ = dyn_cast<OpaqueValueExpr>(E)) {
+ if (const Expr *src = op->getSourceExpr())
+ return isBlockVarRef(src);
+
+ // Casts are necessary to get things like (*(int*)&var) = foo().
+ // We don't really care about the kind of cast here, except
+ // we don't want to look through l2r casts, because it's okay
+ // to get the *value* in a __block variable.
+ } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
+ if (cast->getCastKind() == CK_LValueToRValue)
+ return false;
+ return isBlockVarRef(cast->getSubExpr());
+
+ // Handle unary operators. Again, just aggressively look through
+ // it, ignoring the operation.
+ } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
+ return isBlockVarRef(uop->getSubExpr());
+
+ // Look into the base of a field access.
+ } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
+ return isBlockVarRef(mem->getBase());
+
+ // Look into the base of a subscript.
+ } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
+ return isBlockVarRef(sub->getBase());
+ }
+
+ return false;
}
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
@@ -686,20 +760,26 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
E->getRHS()->getType())
&& "Invalid assignment");
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
- if (VD->hasAttr<BlocksAttr>() &&
- E->getRHS()->HasSideEffects(CGF.getContext())) {
- // When __block variable on LHS, the RHS must be evaluated first
- // as it may change the 'forwarding' field via call to Block_copy.
- LValue RHS = CGF.EmitLValue(E->getRHS());
- LValue LHS = CGF.EmitLValue(E->getLHS());
- Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
- needsGC(E->getLHS()->getType()),
- AggValueSlot::IsAliased);
- EmitFinalDestCopy(E, RHS, true);
- return;
- }
+ // If the LHS might be a __block variable, and the RHS can
+ // potentially cause a block copy, we need to evaluate the RHS first
+ // so that the assignment goes the right place.
+ // This is pretty semantically fragile.
+ if (isBlockVarRef(E->getLHS()) &&
+ E->getRHS()->HasSideEffects(CGF.getContext())) {
+ // Ensure that we have a destination, and evaluate the RHS into that.
+ EnsureDest(E->getRHS()->getType());
+ Visit(E->getRHS());
+
+ // Now emit the LHS and copy into it.
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ EmitCopy(E->getLHS()->getType(),
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased),
+ Dest);
+ return;
+ }
LValue LHS = CGF.EmitLValue(E->getLHS());
@@ -708,8 +788,10 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased);
- CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
- EmitFinalDestCopy(E, LHS, true);
+ CGF.EmitAggExpr(E->getRHS(), LHSSlot);
+
+ // Copy into the destination if the assignment isn't ignored.
+ EmitFinalDestCopy(E->getType(), LHS);
}
void AggExprEmitter::
@@ -762,14 +844,14 @@ void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
return;
}
- EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
+ EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
}
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Ensure that we have a slot, but if we already do, remember
// whether it was externally destructed.
bool wasExternallyDestructed = Dest.isExternallyDestructed();
- Dest = EnsureSlot(E->getType());
+ EnsureDest(E->getType());
// We're going to push a destructor if there isn't already one.
Dest.setExternallyDestructed();
@@ -904,7 +986,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
llvm::GlobalVariable* GV =
new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
llvm::GlobalValue::InternalLinkage, C, "");
- EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
+ EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
return;
}
#endif
@@ -1164,11 +1246,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
/// type. The result is computed into DestPtr. Note that if DestPtr is null,
/// the value of the aggregate expression is not needed. If VolatileDest is
/// true, DestPtr cannot be 0.
-///
-/// \param IsInitializer - true if this evaluation is initializing an
-/// object whose lifetime is already being managed.
-void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
- bool IgnoreResult) {
+void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
assert(E && hasAggregateLLVMType(E->getType()) &&
"Invalid aggregate expression to emit");
assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
@@ -1177,7 +1255,7 @@ void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
// Optimize the slot if possible.
CheckAggExprForMemSetUse(Slot, E, *this);
- AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
+ AggExprEmitter(*this, Slot).Visit(const_cast<Expr*>(E));
}
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
@@ -1192,7 +1270,8 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
- bool isVolatile, unsigned Alignment) {
+ bool isVolatile,
+ CharUnits alignment) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
if (getContext().getLangOpts().CPlusPlus) {
@@ -1225,8 +1304,8 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
std::pair<CharUnits, CharUnits> TypeInfo =
getContext().getTypeInfoInChars(Ty);
- if (!Alignment)
- Alignment = TypeInfo.second.getQuantity();
+ if (alignment.isZero())
+ alignment = TypeInfo.second;
// FIXME: Handle variable sized types.
@@ -1284,7 +1363,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
Builder.CreateMemCpy(DestPtr, SrcPtr,
llvm::ConstantInt::get(IntPtrTy,
TypeInfo.first.getQuantity()),
- Alignment, isVolatile);
+ alignment.getQuantity(), isVolatile);
}
void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index c69c883..7c2c9f1 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -50,36 +50,10 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
// And the rest of the call args.
EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
- return EmitCall(CGM.getTypes().arrangeFunctionCall(FPT->getResultType(), Args,
- FPT->getExtInfo(),
- required),
+ return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
Callee, ReturnValue, Args, MD);
}
-static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
- const Expr *E = Base;
-
- while (true) {
- E = E->IgnoreParens();
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if (CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase ||
- CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- }
-
- break;
- }
-
- QualType DerivedType = E->getType();
- if (const PointerType *PTy = DerivedType->getAs<PointerType>())
- DerivedType = PTy->getPointeeType();
-
- return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
-}
-
// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
// quite what we want.
static const Expr *skipNoOpCastsAndParens(const Expr *E) {
@@ -126,7 +100,7 @@ static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
// b->f();
// }
//
- const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
+ const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
if (MostDerivedClassDecl->hasAttr<FinalAttr>())
return true;
@@ -166,6 +140,14 @@ static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
return false;
}
+static CXXRecordDecl *getCXXRecord(const Expr *E) {
+ QualType T = E->getType();
+ if (const PointerType *PTy = T->getAs<PointerType>())
+ T = PTy->getPointeeType();
+ const RecordType *Ty = T->castAs<RecordType>();
+ return cast<CXXRecordDecl>(Ty->getDecl());
+}
+
// Note: This function also emit constructor calls to support a MSVC
// extensions allowing explicit constructor function call.
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
@@ -179,7 +161,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
CGDebugInfo *DI = getDebugInfo();
- if (DI && CGM.getCodeGenOpts().LimitDebugInfo
+ if (DI && CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo
&& !isa<CallExpr>(ME->getBase())) {
QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
@@ -196,11 +178,45 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
}
// Compute the object pointer.
+ const Expr *Base = ME->getBase();
+ bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
+
+ const CXXMethodDecl *DevirtualizedMethod = NULL;
+ if (CanUseVirtualCall &&
+ canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
+ const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
+ DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
+ assert(DevirtualizedMethod);
+ const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
+ const Expr *Inner = Base->ignoreParenBaseCasts();
+ if (getCXXRecord(Inner) == DevirtualizedClass)
+ // If the class of the Inner expression is where the dynamic method
+ // is defined, build the this pointer from it.
+ Base = Inner;
+ else if (getCXXRecord(Base) != DevirtualizedClass) {
+ // If the method is defined in a class that is not the best dynamic
+ // one or the one of the full expression, we would have to build
+ // a derived-to-base cast to compute the correct this pointer, but
+ // we don't have support for that yet, so do a virtual call.
+ DevirtualizedMethod = NULL;
+ }
+ // If the return types are not the same, this might be a case where more
+ // code needs to run to compensate for it. For example, the derived
+ // method might return a type that inherits form from the return
+ // type of MD and has a prefix.
+ // For now we just avoid devirtualizing these covariant cases.
+ if (DevirtualizedMethod &&
+ DevirtualizedMethod->getResultType().getCanonicalType() !=
+ MD->getResultType().getCanonicalType())
+ DevirtualizedMethod = NULL;
+ }
+
llvm::Value *This;
if (ME->isArrow())
- This = EmitScalarExpr(ME->getBase());
+ This = EmitScalarExpr(Base);
else
- This = EmitLValue(ME->getBase()).getAddress();
+ This = EmitLValue(Base).getAddress();
+
if (MD->isTrivial()) {
if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
@@ -247,10 +263,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
//
// We also don't emit a virtual call if the base expression has a record type
// because then we know what the type is.
- bool UseVirtualCall;
- UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
- && !canDevirtualizeMemberFunctionCalls(getContext(),
- ME->getBase(), MD);
+ bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
+
llvm::Value *Callee;
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
if (UseVirtualCall) {
@@ -260,8 +274,13 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
- else
+ else if (!DevirtualizedMethod)
Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
+ else {
+ const CXXDestructorDecl *DDtor =
+ cast<CXXDestructorDecl>(DevirtualizedMethod);
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
+ }
}
} else if (const CXXConstructorDecl *Ctor =
dyn_cast<CXXConstructorDecl>(MD)) {
@@ -273,8 +292,11 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
- else
+ else if (!DevirtualizedMethod)
Callee = CGM.GetAddrOfFunction(MD, Ty);
+ else {
+ Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
+ }
}
return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
@@ -319,10 +341,12 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// Push the this ptr.
Args.add(RValue::get(This), ThisType);
+
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
- return EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
+ return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
ReturnValue, Args);
}
@@ -409,7 +433,6 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
switch (E->getConstructionKind()) {
case CXXConstructExpr::CK_Delegating:
- assert(0 && "Delegating constructor should not need zeroing");
case CXXConstructExpr::CK_Complete:
EmitNullInitialization(Dest.getAddr(), E->getType());
break;
@@ -1006,7 +1029,7 @@ namespace {
DeleteArgs.add(getPlacementArgs()[I], *AI++);
// Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), DeleteArgs, OperatorDelete);
}
@@ -1067,7 +1090,7 @@ namespace {
}
// Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), DeleteArgs, OperatorDelete);
}
@@ -1182,8 +1205,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// TODO: kill any unnecessary computations done for the size
// argument.
} else {
- RV = EmitCall(CGM.getTypes().arrangeFunctionCall(allocatorArgs,
- allocatorType),
+ RV = EmitCall(CGM.getTypes().arrangeFreeFunctionCall(allocatorArgs,
+ allocatorType),
CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
allocatorArgs, allocator);
}
@@ -1306,7 +1329,7 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
DeleteArgs.add(RValue::get(Size), SizeTy);
// Emit the call to delete.
- EmitCall(CGM.getTypes().arrangeFunctionCall(DeleteArgs, DeleteFTy),
+ EmitCall(CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, DeleteFTy),
CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
DeleteArgs, DeleteFD);
}
@@ -1462,7 +1485,7 @@ namespace {
}
// Emit the call to delete.
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Args, DeleteFTy),
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Args, DeleteFTy),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), Args, OperatorDelete);
}
@@ -1510,18 +1533,7 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
}
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
-
- // Get at the argument before we performed the implicit conversion
- // to void*.
const Expr *Arg = E->getArgument();
- while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
- if (ICE->getCastKind() != CK_UserDefinedConversion &&
- ICE->getType()->isVoidPointerType())
- Arg = ICE->getSubExpr();
- else
- break;
- }
-
llvm::Value *Ptr = EmitScalarExpr(Arg);
// Null check the pointer.
@@ -1631,15 +1643,9 @@ llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
// polymorphic class type, the result refers to a std::type_info object
// representing the type of the most derived object (that is, the dynamic
// type) to which the glvalue refers.
- if (E->getExprOperand()->isGLValue()) {
- if (const RecordType *RT =
- E->getExprOperand()->getType()->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->isPolymorphic())
- return EmitTypeidFromVTable(*this, E->getExprOperand(),
- StdTypeInfoPtrTy);
- }
- }
+ if (E->isPotentiallyEvaluated())
+ return EmitTypeidFromVTable(*this, E->getExprOperand(),
+ StdTypeInfoPtrTy);
QualType OperandTy = E->getExprOperand()->getType();
return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index bc9f9ef..a17a436 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -386,11 +386,11 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are
// ignored:
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield(*Field, LastFD)) {
--FieldNo;
continue;
}
- LastFD = (*Field);
+ LastFD = *Field;
}
// If this is a union, skip all the fields that aren't being initialized.
@@ -399,7 +399,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
// Don't emit anonymous bitfields, they just affect layout.
if (Field->isUnnamedBitfield()) {
- LastFD = (*Field);
+ LastFD = *Field;
continue;
}
@@ -486,11 +486,11 @@ void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are
// ignored:
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield(*Field, LastFD)) {
--FieldNo;
continue;
}
- LastFD = (*Field);
+ LastFD = *Field;
}
// If this is a union, skip all the fields that aren't being initialized.
@@ -499,7 +499,7 @@ void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
// Don't emit anonymous bitfields, they just affect layout.
if (Field->isUnnamedBitfield()) {
- LastFD = (*Field);
+ LastFD = *Field;
continue;
}
@@ -932,7 +932,8 @@ public:
C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
E->getType().isConstant(CGM.getContext()),
llvm::GlobalValue::InternalLinkage,
- C, ".compoundliteral", 0, false,
+ C, ".compoundliteral", 0,
+ llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(E->getType()));
return C;
}
@@ -1300,7 +1301,8 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
if (CGM.getTypes().isZeroInitializable(BaseDecl))
continue;
- uint64_t BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
+ uint64_t BaseOffset =
+ CGM.getContext().toBits(Layout.getBaseClassOffset(BaseDecl));
FillInNullDataMemberPointers(CGM, I->getType(),
Elements, StartOffset + BaseOffset);
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 18891f7..1cccafe 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -498,8 +498,8 @@ public:
Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
return CGF.EmitObjCStringLiteral(E);
}
- Value *VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
- return CGF.EmitObjCNumericLiteral(E);
+ Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
+ return CGF.EmitObjCBoxedExpr(E);
}
Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
return CGF.EmitObjCArrayLiteral(E);
@@ -798,14 +798,15 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
return Builder.getInt(Value);
}
- // Emit debug info for aggregate now, if it was delayed to reduce
+ // Emit debug info for aggregate now, if it was delayed to reduce
// debug info size.
CGDebugInfo *DI = CGF.getDebugInfo();
- if (DI && CGF.CGM.getCodeGenOpts().LimitDebugInfo) {
+ if (DI &&
+ CGF.CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo) {
QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
- DI->getOrCreateRecordType(PTy->getPointeeType(),
+ DI->getOrCreateRecordType(PTy->getPointeeType(),
M->getParent()->getLocation());
}
return EmitLoadOfLValue(E);
@@ -1520,7 +1521,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// FIXME: It would be nice if we didn't have to loop here!
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end();
- Field != FieldEnd; (void)++Field, ++i) {
+ Field != FieldEnd; ++Field, ++i) {
if (*Field == MemberDecl)
break;
}
@@ -1554,9 +1555,8 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Compute the offset to the base.
const RecordType *BaseRT = CurrentType->getAs<RecordType>();
CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
- int64_t OffsetInt = RL.getBaseClassOffsetInBits(BaseRD) /
- CGF.getContext().getCharWidth();
- Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+ CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
break;
}
}
@@ -1682,11 +1682,9 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// Load/convert the LHS.
LValue LHSLV = EmitCheckedLValue(E->getLHS());
OpInfo.LHS = EmitLoadOfLValue(LHSLV);
- OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
- E->getComputationLHSType());
llvm::PHINode *atomicPHI = 0;
- if (const AtomicType *atomicTy = OpInfo.Ty->getAs<AtomicType>()) {
+ if (LHSTy->isAtomicType()) {
// FIXME: For floating point types, we should be saving and restoring the
// floating point environment in the loop.
llvm::BasicBlock *startBB = Builder.GetInsertBlock();
@@ -1695,10 +1693,12 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
Builder.SetInsertPoint(opBB);
atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
atomicPHI->addIncoming(OpInfo.LHS, startBB);
- OpInfo.Ty = atomicTy->getValueType();
OpInfo.LHS = atomicPHI;
}
-
+
+ OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
+ E->getComputationLHSType());
+
// Expand the binary operator.
Result = (this->*Func)(OpInfo);
@@ -2592,7 +2592,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *LHSTmp = LHS;
bool wasCast = false;
llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
- if (rhsVTy->getElementType()->isFloatTy()) {
+ if (rhsVTy->getElementType()->isFloatingPointTy()) {
RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
wasCast = true;
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index d0aa0f5..4ac172d 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -30,7 +30,7 @@ typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
static TryEmitResult
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
- const Expr *E,
+ QualType ET,
const ObjCMethodDecl *Method,
RValue Result);
@@ -51,36 +51,36 @@ llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
}
-/// EmitObjCNumericLiteral - This routine generates code for
-/// the appropriate +[NSNumber numberWith<Type>:] method.
+/// EmitObjCBoxedExpr - This routine generates code to call
+/// the appropriate expression boxing method. This will either be
+/// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:].
///
llvm::Value *
-CodeGenFunction::EmitObjCNumericLiteral(const ObjCNumericLiteral *E) {
+CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// Generate the correct selector for this literal's concrete type.
- const Expr *NL = E->getNumber();
+ const Expr *SubExpr = E->getSubExpr();
// Get the method.
- const ObjCMethodDecl *Method = E->getObjCNumericLiteralMethod();
- assert(Method && "NSNumber method is null");
- Selector Sel = Method->getSelector();
+ const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
+ assert(BoxingMethod && "BoxingMethod is null");
+ assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
+ Selector Sel = BoxingMethod->getSelector();
// Generate a reference to the class pointer, which will be the receiver.
- QualType ResultType = E->getType(); // should be NSNumber *
- const ObjCObjectPointerType *InterfacePointerType =
- ResultType->getAsObjCInterfacePointerType();
- ObjCInterfaceDecl *NSNumberDecl =
- InterfacePointerType->getObjectType()->getInterface();
+ // Assumes that the method was introduced in the class that should be
+ // messaged (avoids pulling it out of the result type).
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
- llvm::Value *Receiver = Runtime.GetClass(Builder, NSNumberDecl);
-
- const ParmVarDecl *argDecl = *Method->param_begin();
+ const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
+ llvm::Value *Receiver = Runtime.GetClass(Builder, ClassDecl);
+
+ const ParmVarDecl *argDecl = *BoxingMethod->param_begin();
QualType ArgQT = argDecl->getType().getUnqualifiedType();
- RValue RV = EmitAnyExpr(NL);
+ RValue RV = EmitAnyExpr(SubExpr);
CallArgList Args;
Args.add(RV, ArgQT);
-
+
RValue result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
- ResultType, Sel, Receiver, Args,
- NSNumberDecl, Method);
+ BoxingMethod->getResultType(), Sel, Receiver, Args,
+ ClassDecl, BoxingMethod);
return Builder.CreateBitCast(result.getScalarVal(),
ConvertType(E->getType()));
}
@@ -202,20 +202,20 @@ llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
/// \brief Adjust the type of the result of an Objective-C message send
/// expression when the method has a related result type.
static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
- const Expr *E,
+ QualType ExpT,
const ObjCMethodDecl *Method,
RValue Result) {
if (!Method)
return Result;
if (!Method->hasRelatedResultType() ||
- CGF.getContext().hasSameType(E->getType(), Method->getResultType()) ||
+ CGF.getContext().hasSameType(ExpT, Method->getResultType()) ||
!Result.isScalar())
return Result;
// We have applied a related result type. Cast the rvalue appropriately.
return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
- CGF.ConvertType(E->getType())));
+ CGF.ConvertType(ExpT)));
}
/// Decide whether to extend the lifetime of the receiver of a
@@ -401,7 +401,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
Builder.CreateStore(newSelf, selfAddr);
}
- return AdjustRelatedResultType(*this, E, method, result);
+ return AdjustRelatedResultType(*this, E->getType(), method, result);
}
namespace {
@@ -507,9 +507,9 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Context.VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Context.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
fn, ReturnValueSlot(), args);
}
@@ -580,7 +580,7 @@ namespace {
};
}
-/// Pick an implementation strategy for the the given property synthesis.
+/// Pick an implementation strategy for the given property synthesis.
PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
const ObjCPropertyImplDecl *propImpl) {
const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
@@ -698,8 +698,9 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
Kind = Native;
}
-/// GenerateObjCGetter - Generate an Objective-C property getter
-/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// \brief Generate an Objective-C property getter function.
+///
+/// The given Decl must be an ObjCImplementationDecl. \@synthesize
/// is illegal within a category.
void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
@@ -710,7 +711,7 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
assert(OMD && "Invalid call to generate getter (empty method)");
StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
- generateObjCGetterBody(IMP, PID, AtomicHelperFn);
+ generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
FinishFunction();
}
@@ -763,15 +764,17 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
llvm::Value *copyCppAtomicObjectFn =
CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
+ args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
copyCppAtomicObjectFn, ReturnValueSlot(), args);
}
void
CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
+ const ObjCMethodDecl *GetterMethodDecl,
llvm::Constant *AtomicHelperFn) {
// If there's a non-trivial 'get' expression, we just have to emit that.
if (!hasTrivialGetExpr(propImpl)) {
@@ -850,16 +853,16 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
- RValue RV = EmitCall(getTypes().arrangeFunctionCall(propType, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ RValue RV = EmitCall(getTypes().arrangeFreeFunctionCall(propType, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
getPropertyFn, ReturnValueSlot(), args);
// We need to fix the type here. Ivars with copy & retain are
// always objects so we don't need to worry about complex or
// aggregates.
RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
- getTypes().ConvertType(propType)));
+ getTypes().ConvertType(getterMethod->getResultType())));
EmitReturnOfRValue(RV, propType);
@@ -905,6 +908,8 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
}
value = Builder.CreateBitCast(value, ConvertType(propType));
+ value = Builder.CreateBitCast(value,
+ ConvertType(GetterMethodDecl->getResultType()));
}
EmitReturnOfRValue(RValue::get(value), propType);
@@ -952,9 +957,10 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
+ args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
copyStructFn, ReturnValueSlot(), args);
}
@@ -989,9 +995,10 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
llvm::Value *copyCppAtomicObjectFn =
CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
- CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
+ args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
copyCppAtomicObjectFn, ReturnValueSlot(), args);
@@ -1125,9 +1132,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
if (setOptimizedPropertyFn) {
args.add(RValue::get(arg), getContext().getObjCIdType());
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
- EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ EmitCall(getTypes().arrangeFreeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
setOptimizedPropertyFn, ReturnValueSlot(), args);
} else {
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
@@ -1138,9 +1145,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
getContext().BoolTy);
// FIXME: We shouldn't need to get the function info here, the runtime
// already should have computed it to build the function.
- EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ EmitCall(getTypes().arrangeFreeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
setPropertyFn, ReturnValueSlot(), args);
}
@@ -1206,8 +1213,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
EmitStmt(&assign);
}
-/// GenerateObjCSetter - Generate an Objective-C property setter
-/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// \brief Generate an Objective-C property setter function.
+///
+/// The given Decl must be an ObjCImplementationDecl. \@synthesize
/// is illegal within a category.
void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
@@ -1502,9 +1510,9 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
Args2.add(RValue::get(V), getContext().getObjCIdType());
// FIXME: We shouldn't need to get the function info here, the runtime already
// should have computed it to build the function.
- EmitCall(CGM.getTypes().arrangeFunctionCall(getContext().VoidTy, Args2,
- FunctionType::ExtInfo(),
- RequiredArgs::All),
+ EmitCall(CGM.getTypes().arrangeFreeFunctionCall(getContext().VoidTy, Args2,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
EnumerationMutationFn, ReturnValueSlot(), Args2);
// Otherwise, or if the mutation function returns, just continue.
@@ -1685,11 +1693,16 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
StringRef fnName) {
llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
- // In -fobjc-no-arc-runtime, emit weak references to the runtime
- // support library.
- if (!CGM.getCodeGenOpts().ObjCRuntimeHasARC)
- if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ // If the target runtime doesn't naturally support ARC, emit weak
+ // references to the runtime support library. We don't really
+ // permit this to fail, but we need a particular relocation style.
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn)) {
+ if (!CGM.getLangOpts().ObjCRuntime.hasARC())
f->setLinkage(llvm::Function::ExternalWeakLinkage);
+ // set nonlazybind attribute for these APIs for performance.
+ if (fnName == "objc_retain" || fnName == "objc_release")
+ f->addFnAttr(llvm::Attribute::NonLazyBind);
+ }
return fn;
}
@@ -1808,8 +1821,8 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
}
/// Produce the code to do a retain. Based on the type, calls one of:
-/// call i8* @objc_retain(i8* %value)
-/// call i8* @objc_retainBlock(i8* %value)
+/// call i8* \@objc_retain(i8* %value)
+/// call i8* \@objc_retainBlock(i8* %value)
llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
if (type->isBlockPointerType())
return EmitARCRetainBlock(value, /*mandatory*/ false);
@@ -1818,7 +1831,7 @@ llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
}
/// Retain the given object, with normal retain semantics.
-/// call i8* @objc_retain(i8* %value)
+/// call i8* \@objc_retain(i8* %value)
llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
return emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_retain,
@@ -1826,7 +1839,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
}
/// Retain the given block, with _Block_copy semantics.
-/// call i8* @objc_retainBlock(i8* %value)
+/// call i8* \@objc_retainBlock(i8* %value)
///
/// \param mandatory - If false, emit the call with metadata
/// indicating that it's okay for the optimizer to eliminate this call
@@ -1856,7 +1869,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
}
/// Retain the given object which is the result of a function call.
-/// call i8* @objc_retainAutoreleasedReturnValue(i8* %value)
+/// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
///
/// Yes, this function name is one character away from a different
/// call with completely different semantics.
@@ -1906,7 +1919,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
}
/// Release the given object.
-/// call void @objc_release(i8* %value)
+/// call void \@objc_release(i8* %value)
void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
if (isa<llvm::ConstantPointerNull>(value)) return;
@@ -1933,7 +1946,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
}
/// Store into a strong object. Always calls this:
-/// call void @objc_storeStrong(i8** %addr, i8* %value)
+/// call void \@objc_storeStrong(i8** %addr, i8* %value)
llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
llvm::Value *value,
bool ignored) {
@@ -1958,7 +1971,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
}
/// Store into a strong object. Sometimes calls this:
-/// call void @objc_storeStrong(i8** %addr, i8* %value)
+/// call void \@objc_storeStrong(i8** %addr, i8* %value)
/// Other times, breaks it down into components.
llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
llvm::Value *newValue,
@@ -1994,7 +2007,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
}
/// Autorelease the given object.
-/// call i8* @objc_autorelease(i8* %value)
+/// call i8* \@objc_autorelease(i8* %value)
llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
return emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_autorelease,
@@ -2002,7 +2015,7 @@ llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
}
/// Autorelease the given object.
-/// call i8* @objc_autoreleaseReturnValue(i8* %value)
+/// call i8* \@objc_autoreleaseReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value,
@@ -2011,7 +2024,7 @@ CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
}
/// Do a fused retain/autorelease of the given object.
-/// call i8* @objc_retainAutoreleaseReturnValue(i8* %value)
+/// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value,
@@ -2020,10 +2033,10 @@ CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
}
/// Do a fused retain/autorelease of the given object.
-/// call i8* @objc_retainAutorelease(i8* %value)
+/// call i8* \@objc_retainAutorelease(i8* %value)
/// or
-/// %retain = call i8* @objc_retainBlock(i8* %value)
-/// call i8* @objc_autorelease(i8* %retain)
+/// %retain = call i8* \@objc_retainBlock(i8* %value)
+/// call i8* \@objc_autorelease(i8* %retain)
llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
llvm::Value *value) {
if (!type->isBlockPointerType())
@@ -2039,7 +2052,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
}
/// Do a fused retain/autorelease of the given object.
-/// call i8* @objc_retainAutorelease(i8* %value)
+/// call i8* \@objc_retainAutorelease(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
return emitARCValueOperation(*this, value,
@@ -2047,7 +2060,7 @@ CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
"objc_retainAutorelease");
}
-/// i8* @objc_loadWeak(i8** %addr)
+/// i8* \@objc_loadWeak(i8** %addr)
/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
return emitARCLoadOperation(*this, addr,
@@ -2055,14 +2068,14 @@ llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
"objc_loadWeak");
}
-/// i8* @objc_loadWeakRetained(i8** %addr)
+/// i8* \@objc_loadWeakRetained(i8** %addr)
llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
return emitARCLoadOperation(*this, addr,
CGM.getARCEntrypoints().objc_loadWeakRetained,
"objc_loadWeakRetained");
}
-/// i8* @objc_storeWeak(i8** %addr, i8* %value)
+/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
/// Returns %value.
llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
llvm::Value *value,
@@ -2072,7 +2085,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
"objc_storeWeak", ignored);
}
-/// i8* @objc_initWeak(i8** %addr, i8* %value)
+/// i8* \@objc_initWeak(i8** %addr, i8* %value)
/// Returns %value. %addr is known to not have a current weak entry.
/// Essentially equivalent to:
/// *addr = nil; objc_storeWeak(addr, value);
@@ -2092,7 +2105,7 @@ void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
"objc_initWeak", /*ignored*/ true);
}
-/// void @objc_destroyWeak(i8** %addr)
+/// void \@objc_destroyWeak(i8** %addr)
/// Essentially objc_storeWeak(addr, nil).
void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
@@ -2110,7 +2123,7 @@ void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
call->setDoesNotThrow();
}
-/// void @objc_moveWeak(i8** %dest, i8** %src)
+/// void \@objc_moveWeak(i8** %dest, i8** %src)
/// Disregards the current value in %dest. Leaves %src pointing to nothing.
/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
@@ -2119,7 +2132,7 @@ void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
"objc_moveWeak");
}
-/// void @objc_copyWeak(i8** %dest, i8** %src)
+/// void \@objc_copyWeak(i8** %dest, i8** %src)
/// Disregards the current value in %dest. Essentially
/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
@@ -2129,7 +2142,7 @@ void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
}
/// Produce the code to do a objc_autoreleasepool_push.
-/// call i8* @objc_autoreleasePoolPush(void)
+/// call i8* \@objc_autoreleasePoolPush(void)
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
if (!fn) {
@@ -2145,7 +2158,7 @@ llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
}
/// Produce the code to do a primitive release.
-/// call void @objc_autoreleasePoolPop(i8* %ptr)
+/// call void \@objc_autoreleasePoolPop(i8* %ptr)
void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
assert(value->getType() == Int8PtrTy);
@@ -2717,7 +2730,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
// Keep track of the current cleanup stack depth.
RunCleanupsScope Scope(*this);
- if (CGM.getCodeGenOpts().ObjCRuntimeHasARC) {
+ if (CGM.getLangOpts().ObjCRuntime.hasARC()) {
llvm::Value *token = EmitObjCAutoreleasePoolPush();
EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
} else {
@@ -2749,6 +2762,11 @@ void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
Builder.CreateCall(extender, object)->setDoesNotThrow();
}
+static bool hasAtomicCopyHelperAPI(const ObjCRuntime &runtime) {
+ // For now, only NeXT has these APIs.
+ return runtime.isNeXTFamily();
+}
+
/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
/// non-trivial copy assignment function, produce following helper function.
/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
@@ -2757,7 +2775,8 @@ llvm::Constant *
CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID) {
// FIXME. This api is for NeXt runtime only for now.
- if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ if (!getLangOpts().CPlusPlus ||
+ !hasAtomicCopyHelperAPI(getLangOpts().ObjCRuntime))
return 0;
QualType Ty = PID->getPropertyIvarDecl()->getType();
if (!Ty->isRecordType())
@@ -2841,7 +2860,8 @@ llvm::Constant *
CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID) {
// FIXME. This api is for NeXt runtime only for now.
- if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ if (!getLangOpts().CPlusPlus ||
+ !hasAtomicCopyHelperAPI(getLangOpts().ObjCRuntime))
return 0;
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
QualType Ty = PD->getType();
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index db0bd95..6d129d0 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -99,8 +99,8 @@ class LazyRuntimeFunction {
/// GNU Objective-C runtime code generation. This class implements the parts of
-/// Objective-C support that are specific to the GNU family of runtimes (GCC and
-/// GNUstep).
+/// Objective-C support that are specific to the GNU family of runtimes (GCC,
+/// GNUstep and ObjFW).
class CGObjCGNU : public CGObjCRuntime {
protected:
/// The LLVM module into which output is inserted
@@ -292,8 +292,8 @@ private:
protected:
/// Function used for throwing Objective-C exceptions.
LazyRuntimeFunction ExceptionThrowFn;
- /// Function used for rethrowing exceptions, used at the end of @finally or
- /// @synchronize blocks.
+ /// Function used for rethrowing exceptions, used at the end of \@finally or
+ /// \@synchronize blocks.
LazyRuntimeFunction ExceptionReThrowFn;
/// Function called when entering a catch function. This is required for
/// differentiating Objective-C exceptions and foreign exceptions.
@@ -301,9 +301,9 @@ protected:
/// Function called when exiting from a catch block. Used to do exception
/// cleanup.
LazyRuntimeFunction ExitCatchFn;
- /// Function called when entering an @synchronize block. Acquires the lock.
+ /// Function called when entering an \@synchronize block. Acquires the lock.
LazyRuntimeFunction SyncEnterFn;
- /// Function called when exiting an @synchronize block. Releases the lock.
+ /// Function called when exiting an \@synchronize block. Releases the lock.
LazyRuntimeFunction SyncExitFn;
private:
@@ -350,7 +350,7 @@ private:
ArrayRef<Selector> MethodSels,
ArrayRef<llvm::Constant *> MethodTypes,
bool isClassMethodList);
- /// Emits an empty protocol. This is used for @protocol() where no protocol
+ /// Emits an empty protocol. This is used for \@protocol() where no protocol
/// is found. The runtime will (hopefully) fix up the pointer to refer to the
/// real protocol.
llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
@@ -397,11 +397,11 @@ private:
const ObjCIvarDecl *Ivar);
/// Emits a reference to a class. This allows the linker to object if there
/// is no class of the matching name.
+protected:
void EmitClassRef(const std::string &className);
/// Emits a pointer to the named class
- llvm::Value *GetClassNamed(CGBuilderTy &Builder, const std::string &Name,
- bool isWeak);
-protected:
+ virtual llvm::Value *GetClassNamed(CGBuilderTy &Builder,
+ const std::string &Name, bool isWeak);
/// Looks up the method for sending a message to the specified object. This
/// mechanism differs between the GCC and GNU runtimes, so this method must be
/// overridden in subclasses.
@@ -653,6 +653,33 @@ class CGObjCGNUstep : public CGObjCGNU {
}
};
+/// The ObjFW runtime, which closely follows the GCC runtime's
+/// compiler ABI. Support here is due to Jonathan Schleifer, the
+/// ObjFW maintainer.
+class CGObjCObjFW : public CGObjCGCC {
+ /// Emit class references unconditionally as direct symbol references.
+ virtual llvm::Value *GetClassNamed(CGBuilderTy &Builder,
+ const std::string &Name, bool isWeak) {
+ if (isWeak)
+ return CGObjCGNU::GetClassNamed(Builder, Name, isWeak);
+
+ EmitClassRef(Name);
+
+ std::string SymbolName = "_OBJC_CLASS_" + Name;
+
+ llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(SymbolName);
+
+ if (!ClassSymbol)
+ ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, SymbolName);
+
+ return ClassSymbol;
+ }
+
+public:
+ CGObjCObjFW(CodeGenModule &Mod): CGObjCGCC(Mod) {}
+};
} // end anonymous namespace
@@ -889,7 +916,7 @@ llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
// foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
// a pointer indicating object catchalls, and NULL to indicate real
// catchalls
- if (CGM.getLangOpts().ObjCNonFragileABI) {
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
return MakeConstantString("@id");
} else {
return 0;
@@ -1627,7 +1654,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
iter = PD->prop_begin(), endIter = PD->prop_end();
iter != endIter ; iter++) {
std::vector<llvm::Constant*> Fields;
- ObjCPropertyDecl *property = (*iter);
+ ObjCPropertyDecl *property = *iter;
Fields.push_back(MakeConstantString(property->getNameAsString()));
Fields.push_back(llvm::ConstantInt::get(Int8Ty,
@@ -1877,7 +1904,7 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
iter != endIter ; iter++) {
std::vector<llvm::Constant*> Fields;
- ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+ ObjCPropertyDecl *property = iter->getPropertyDecl();
ObjCPropertyImplDecl *propertyImpl = *iter;
bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
ObjCPropertyImplDecl::Synthesize);
@@ -1984,7 +2011,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
// For non-fragile ivars, set the instance size to 0 - {the size of just this
// class}. The runtime will then set this to the correct value on load.
- if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
+ if (CGM.getContext().getLangOpts().ObjCRuntime.isNonFragile()) {
instanceSize = 0 - (instanceSize - superInstanceSize);
}
@@ -1999,7 +2026,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Get the offset
uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
uint64_t Offset = BaseOffset;
- if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
+ if (CGM.getContext().getLangOpts().ObjCRuntime.isNonFragile()) {
Offset = BaseOffset - superInstanceSize;
}
llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
@@ -2486,25 +2513,8 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
ExceptionAsObject = CGF.ObjCEHValueStack.back();
}
ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
-
- // Note: This may have to be an invoke, if we want to support constructs like:
- // @try {
- // @throw(obj);
- // }
- // @catch(id) ...
- //
- // This is effectively turning @throw into an incredibly-expensive goto, but
- // it may happen as a result of inlining followed by missed optimizations, or
- // as a result of stupidity.
- llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
- if (!UnwindBB) {
- CGF.Builder.CreateCall(ExceptionThrowFn, ExceptionAsObject);
- CGF.Builder.CreateUnreachable();
- } else {
- CGF.Builder.CreateInvoke(ExceptionThrowFn, UnwindBB, UnwindBB,
- ExceptionAsObject);
- }
- // Clear the insertion point to indicate we are in unreachable code.
+ CGF.EmitCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
CGF.Builder.ClearInsertionPoint();
}
@@ -2640,7 +2650,7 @@ static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) {
- if (CGM.getLangOpts().ObjCNonFragileABI) {
+ if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) {
Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
if (RuntimeVersion < 10)
return CGF.Builder.CreateZExtOrBitCast(
@@ -2665,7 +2675,20 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
CGObjCRuntime *
clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) {
- if (CGM.getLangOpts().ObjCNonFragileABI)
+ switch (CGM.getLangOpts().ObjCRuntime.getKind()) {
+ case ObjCRuntime::GNUstep:
return new CGObjCGNUstep(CGM);
- return new CGObjCGCC(CGM);
+
+ case ObjCRuntime::GCC:
+ return new CGObjCGCC(CGM);
+
+ case ObjCRuntime::ObjFW:
+ return new CGObjCObjFW(CGM);
+
+ case ObjCRuntime::FragileMacOSX:
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ llvm_unreachable("these runtimes are not GNU runtimes");
+ }
+ llvm_unreachable("bad runtime");
}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index e5246f1..ef802a3 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -241,9 +241,9 @@ public:
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(IdType, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(IdType, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
}
@@ -261,9 +261,9 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
}
@@ -287,9 +287,9 @@ public:
Params.push_back(IdType);
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
const char *name;
if (atomic && copy)
name = "objc_setProperty_atomic_copy";
@@ -314,9 +314,9 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
}
@@ -333,9 +333,9 @@ public:
Params.push_back(Ctx.VoidPtrTy);
Params.push_back(Ctx.VoidPtrTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_copyCppObjectAtomic");
}
@@ -346,7 +346,7 @@ public:
SmallVector<CanQualType,1> Params;
Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, Params,
FunctionType::ExtInfo(),
RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
@@ -2515,7 +2515,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
Values);
std::string Name("\01L_OBJC_METACLASS_");
- Name += ID->getNameAsCString();
+ Name += ID->getName();
// Check for a forward reference.
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
@@ -3612,7 +3612,8 @@ enum ImageInfoFlags {
// A flag indicating that the module has no instances of a @synthesize of a
// superclass variable. <rdar://problem/6803242>
- eImageInfo_CorrectedSynthesize = (1 << 4)
+ eImageInfo_CorrectedSynthesize = (1 << 4),
+ eImageInfo_ImageIsSimulated = (1 << 5)
};
void CGObjCCommonMac::EmitImageInfo() {
@@ -3657,6 +3658,14 @@ void CGObjCCommonMac::EmitImageInfo() {
llvm::MDNode::get(VMContext, Ops));
}
}
+
+ // Indicate whether we're compiling this to run on a simulator.
+ const llvm::Triple &Triple = CGM.getTarget().getTriple();
+ if (Triple.getOS() == llvm::Triple::IOS &&
+ (Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::x86_64))
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Is Simulated",
+ eImageInfo_ImageIsSimulated);
}
// struct objc_module {
@@ -3809,7 +3818,10 @@ void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
bool &HasUnion) {
const RecordDecl *RD = RT->getDecl();
// FIXME - Use iterator.
- SmallVector<const FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
+ SmallVector<const FieldDecl*, 16> Fields;
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i)
+ Fields.push_back(*i);
llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
const llvm::StructLayout *RecLayout =
CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
@@ -4374,9 +4386,10 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
SourceLocation(), SourceLocation(),
&Ctx.Idents.get("_objc_super"));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.getObjCIdType(), 0, 0, false, false));
+ Ctx.getObjCIdType(), 0, 0, false, ICIS_NoInit));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.getObjCClassType(), 0, 0, false, false));
+ Ctx.getObjCClassType(), 0, 0, false,
+ ICIS_NoInit));
RD->completeDefinition();
SuperCTy = Ctx.getTagDeclType(RD);
@@ -4755,9 +4768,10 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
SourceLocation(), SourceLocation(),
&Ctx.Idents.get("_message_ref_t"));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.VoidPtrTy, 0, 0, false, false));
+ Ctx.VoidPtrTy, 0, 0, false, ICIS_NoInit));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
- Ctx.getObjCSelType(), 0, 0, false, false));
+ Ctx.getObjCSelType(), 0, 0, false,
+ ICIS_NoInit));
RD->completeDefinition();
MessageRefCTy = Ctx.getTagDeclType(RD);
@@ -6367,7 +6381,18 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
CodeGen::CGObjCRuntime *
CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
- if (CGM.getLangOpts().ObjCNonFragileABI)
- return new CGObjCNonFragileABIMac(CGM);
+ switch (CGM.getLangOpts().ObjCRuntime.getKind()) {
+ case ObjCRuntime::FragileMacOSX:
return new CGObjCMac(CGM);
+
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
+ return new CGObjCNonFragileABIMac(CGM);
+
+ case ObjCRuntime::GNUstep:
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
+ llvm_unreachable("these runtimes are not Mac runtimes");
+ }
+ llvm_unreachable("bad runtime");
}
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 9370096..9aa6837 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -120,6 +120,8 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
uint64_t ContainingTypeAlign = CGF.CGM.getContext().getTargetInfo().getCharAlign();
uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
+ CharUnits ContainingTypeAlignCharUnits =
+ CGF.CGM.getContext().toCharUnitsFromBits(ContainingTypeAlign);
// Allocate a new CGBitFieldInfo object to describe this access.
//
@@ -132,7 +134,8 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
ContainingTypeSize, ContainingTypeAlign));
return LValue::MakeBitfield(V, *Info,
- IvarTy.withCVRQualifiers(CVRQualifiers));
+ IvarTy.withCVRQualifiers(CVRQualifiers),
+ ContainingTypeAlignCharUnits);
}
namespace {
@@ -334,7 +337,7 @@ void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
///
/// \param method - may be null
/// \param resultType - the result type to use if there's no method
-/// \param argInfo - the actual arguments, including implicit ones
+/// \param callArgs - the actual arguments, including implicit ones
CGObjCRuntime::MessageSendInfo
CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
QualType resultType,
@@ -355,17 +358,17 @@ CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
// Otherwise, there is.
FunctionType::ExtInfo einfo = signature.getExtInfo();
const CGFunctionInfo &argsInfo =
- CGM.getTypes().arrangeFunctionCall(resultType, callArgs, einfo,
- signature.getRequiredArgs());
+ CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs, einfo,
+ signature.getRequiredArgs());
return MessageSendInfo(argsInfo, signatureType);
}
// There's no method; just use a default CC.
const CGFunctionInfo &argsInfo =
- CGM.getTypes().arrangeFunctionCall(resultType, callArgs,
- FunctionType::ExtInfo(),
- RequiredArgs::All);
+ CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
// Derive the signature to call from that.
llvm::PointerType *signatureType =
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index ccf4d4d..219a3e4 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -91,20 +91,20 @@ protected:
llvm::Value *Offset);
/// Emits a try / catch statement. This function is intended to be called by
/// subclasses, and provides a generic mechanism for generating these, which
- /// should be usable by all runtimes. The caller must provide the functions to
- /// call when entering and exiting a @catch() block, and the function used to
- /// rethrow exceptions. If the begin and end catch functions are NULL, then
- /// the function assumes that the EH personality function provides the
- /// thrown object directly.
+ /// should be usable by all runtimes. The caller must provide the functions
+ /// to call when entering and exiting a \@catch() block, and the function
+ /// used to rethrow exceptions. If the begin and end catch functions are
+ /// NULL, then the function assumes that the EH personality function provides
+ /// the thrown object directly.
void EmitTryCatchStmt(CodeGenFunction &CGF,
const ObjCAtTryStmt &S,
llvm::Constant *beginCatchFn,
llvm::Constant *endCatchFn,
llvm::Constant *exceptionRethrowFn);
- /// Emits an @synchronize() statement, using the syncEnterFn and syncExitFn
- /// arguments as the functions called to lock and unlock the object. This
- /// function can be called by subclasses that use zero-cost exception
- /// handling.
+ /// Emits an \@synchronize() statement, using the \p syncEnterFn and
+ /// \p syncExitFn arguments as the functions called to lock and unlock
+ /// the object. This function can be called by subclasses that use
+ /// zero-cost exception handling.
void EmitAtSynchronizedStmt(CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S,
llvm::Function *syncEnterFn,
@@ -179,7 +179,7 @@ public:
const ObjCMethodDecl *Method = 0) = 0;
/// Emit the code to return the named protocol as an object, as in a
- /// @protocol expression.
+ /// \@protocol expression.
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *OPD) = 0;
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index 19973b4..d1b370a 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -985,7 +985,8 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
if (!ForEH && !getContext().getLangOpts().RTTI)
return llvm::Constant::getNullValue(Int8PtrTy);
- if (ForEH && Ty->isObjCObjectPointerType() && !LangOpts.NeXTRuntime)
+ if (ForEH && Ty->isObjCObjectPointerType() &&
+ LangOpts.ObjCRuntime.isGNUFamily())
return ObjCRuntime->GetEHType(Ty);
return RTTIBuilder(*this).BuildTypeInfo(Ty);
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index 25a0a50..94c822f 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -64,12 +64,7 @@ public:
/// Bit width of the memory access to perform.
unsigned AccessWidth;
- /// The alignment of the memory access, or 0 if the default alignment should
- /// be used.
- //
- // FIXME: Remove use of 0 to encode default, instead have IRgen do the right
- // thing when it generates the code, if avoiding align directives is
- // desired.
+ /// The alignment of the memory access, assuming the parent is aligned.
CharUnits AccessAlignment;
/// Offset for the target value.
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 1193e97..d642ef8 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -235,6 +235,8 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
uint64_t FieldSize,
uint64_t ContainingTypeSizeInBits,
unsigned ContainingTypeAlign) {
+ assert(ContainingTypeAlign && "Expected alignment to be specified");
+
llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
CharUnits TypeSizeInBytes =
CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
@@ -714,14 +716,18 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
}
// Otherwise, add a vtable / vf-table if the layout says to do so.
- } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft
- ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1)
- : RD->isDynamicClass()) {
+ } else if (Layout.hasOwnVFPtr()) {
llvm::Type *FunctionType =
llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
/*isVarArg=*/true);
llvm::Type *VTableTy = FunctionType->getPointerTo();
-
+
+ if (getTypeAlignment(VTableTy) > Alignment) {
+ // FIXME: Should we allow this to happen in Sema?
+ assert(!Packed && "Alignment is wrong even with packed struct!");
+ return false;
+ }
+
assert(NextFieldOffset.isZero() &&
"VTable pointer must come first!");
AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
@@ -814,7 +820,7 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are
// ignored:
- const FieldDecl *FD = (*Field);
+ const FieldDecl *FD = *Field;
if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
--FieldNo;
continue;
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index a1d0789..467c779 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -133,6 +133,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
+ case Stmt::MSAsmStmtClass: EmitMSAsmStmt(cast<MSAsmStmt>(*S)); break;
case Stmt::ObjCAtTryStmtClass:
EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
@@ -155,7 +156,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::ObjCAutoreleasePoolStmtClass:
EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
break;
-
+
case Stmt::CXXTryStmtClass:
EmitCXXTryStmt(cast<CXXTryStmt>(*S));
break;
@@ -360,15 +361,14 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
Int8PtrTy, "addr");
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
-
// Get the basic block for the indirect goto.
llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
-
+
// The first instruction in the block has to be the PHI for the switch dest,
// add an entry for this branch.
cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
-
+
EmitBranch(IndGotoBB);
}
@@ -462,12 +462,12 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
if (S.getConditionVariable())
EmitAutoVarDecl(*S.getConditionVariable());
-
+
// Evaluate the conditional in the while header. C99 6.8.5.1: The
// evaluation of the controlling expression takes place before each
// execution of the loop body.
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
-
+
// while(1) is common, avoid extra exit blocks. Be sure
// to correctly handle break/continue though.
bool EmitBoolCondBranch = true;
@@ -489,7 +489,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
EmitBranchThroughCleanup(LoopExit);
}
}
-
+
// Emit the loop body. We have to emit this in a cleanup scope
// because it might be a singleton DeclStmt.
{
@@ -584,7 +584,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// Create a cleanup scope for the condition variable cleanups.
RunCleanupsScope ConditionScope(*this);
-
+
llvm::Value *BoolCondVal = 0;
if (S.getCond()) {
// If the for statement has a condition scope, emit the local variable
@@ -598,7 +598,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// create a block to stage a loop exit along.
if (ForScope.requiresCleanups())
ExitBlock = createBasicBlock("for.cond.cleanup");
-
+
// As long as the condition is true, iterate the loop.
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
@@ -679,7 +679,7 @@ void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (ForScope.requiresCleanups())
ExitBlock = createBasicBlock("for.cond.cleanup");
-
+
// The loop body, consisting of the specified body and the loop variable.
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
@@ -750,7 +750,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// Apply the named return value optimization for this return statement,
// which means doing nothing: the appropriate result has already been
// constructed into the NRVO variable.
-
+
// If there is an NRVO flag for this variable, set it to 1 into indicate
// that the cleanup code should not destroy the variable.
if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
@@ -901,7 +901,7 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
// try to not emit an empty block.
if ((CGM.getCodeGenOpts().OptimizationLevel > 0) && isa<BreakStmt>(S.getSubStmt())) {
JumpDest Block = BreakContinueStack.back().BreakBlock;
-
+
// Only do this optimization if there are no cleanups that need emitting.
if (isObviouslyBranchWithoutCleanups(Block)) {
SwitchInsn->addCase(CaseVal, Block.getBlock());
@@ -915,7 +915,7 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
return;
}
}
-
+
EmitBlock(createBasicBlock("sw.bb"));
llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
SwitchInsn->addCase(CaseVal, CaseDest);
@@ -984,7 +984,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
// If this is a null statement, just succeed.
if (S == 0)
return Case ? CSFC_Success : CSFC_FallThrough;
-
+
// If this is the switchcase (case 4: or default) that we're looking for, then
// we're in business. Just add the substatement.
if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
@@ -993,7 +993,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
return CollectStatementsForCase(SC->getSubStmt(), 0, FoundCase,
ResultStmts);
}
-
+
// Otherwise, this is some other case or default statement, just ignore it.
return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
ResultStmts);
@@ -1003,7 +1003,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
// return a success!
if (Case == 0 && isa<BreakStmt>(S))
return CSFC_Success;
-
+
// If this is a switch statement, then it might contain the SwitchCase, the
// break, or neither.
if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
@@ -1015,12 +1015,12 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
// using the declaration even if it is skipped, so we can't optimize out
// the decl if the kept statements might refer to it.
bool HadSkippedDecl = false;
-
+
// If we're looking for the case, just see if we can skip each of the
// substatements.
for (; Case && I != E; ++I) {
HadSkippedDecl |= isa<DeclStmt>(*I);
-
+
switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
case CSFC_Failure: return CSFC_Failure;
case CSFC_Success:
@@ -1033,7 +1033,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
// optimization.
if (HadSkippedDecl)
return CSFC_Failure;
-
+
for (++I; I != E; ++I)
if (CodeGenFunction::ContainsLabel(*I, true))
return CSFC_Failure;
@@ -1047,7 +1047,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
assert(FoundCase && "Didn't find case but returned fallthrough?");
// We recursively found Case, so we're not looking for it anymore.
Case = 0;
-
+
// If we found the case and skipped declarations, we can't do the
// optimization.
if (HadSkippedDecl)
@@ -1074,9 +1074,9 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
if (CodeGenFunction::ContainsLabel(*I, true))
return CSFC_Failure;
return CSFC_Success;
- }
+ }
}
-
+
return Case ? CSFC_Success : CSFC_FallThrough;
}
@@ -1088,11 +1088,11 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
return CSFC_Failure;
return CSFC_Success;
}
-
+
// Otherwise, we want to include this statement. Everything is cool with that
// so long as it doesn't contain a break out of the switch we're in.
if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
-
+
// Otherwise, everything is great. Include the statement and tell the caller
// that we fall through and include the next statement as well.
ResultStmts.push_back(S);
@@ -1104,14 +1104,14 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
/// for a switch on constant. See the comment above CollectStatementsForCase
/// for more details.
static bool FindCaseStatementsForValue(const SwitchStmt &S,
- const llvm::APInt &ConstantCondValue,
+ const llvm::APSInt &ConstantCondValue,
SmallVectorImpl<const Stmt*> &ResultStmts,
ASTContext &C) {
// First step, find the switch case that is being branched to. We can do this
// efficiently by scanning the SwitchCase list.
const SwitchCase *Case = S.getSwitchCaseList();
const DefaultStmt *DefaultCase = 0;
-
+
for (; Case; Case = Case->getNextSwitchCase()) {
// It's either a default or case. Just remember the default statement in
// case we're not jumping to any numbered cases.
@@ -1119,17 +1119,17 @@ static bool FindCaseStatementsForValue(const SwitchStmt &S,
DefaultCase = DS;
continue;
}
-
+
// Check to see if this case is the one we're looking for.
const CaseStmt *CS = cast<CaseStmt>(Case);
// Don't handle case ranges yet.
if (CS->getRHS()) return false;
-
+
// If we found our case, remember it as 'case'.
if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
break;
}
-
+
// If we didn't find a matching case, we use a default if it exists, or we
// elide the whole switch body!
if (Case == 0) {
@@ -1168,7 +1168,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// See if we can constant fold the condition of the switch and therefore only
// emit the live case statement (if any) of the switch.
- llvm::APInt ConstantCondValue;
+ llvm::APSInt ConstantCondValue;
if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
SmallVector<const Stmt*, 4> CaseStmts;
if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
@@ -1192,7 +1192,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
return;
}
}
-
+
llvm::Value *CondV = EmitScalarExpr(S.getCond());
// Create basic block to hold stuff that comes after switch
@@ -1380,7 +1380,7 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
if (!StrVal.empty()) {
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
const LangOptions &LangOpts = CGF.CGM.getLangOpts();
-
+
// Add the location of the start of each subsequent line of the asm to the
// MDNode.
for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
@@ -1390,8 +1390,8 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
LineLoc.getRawEncoding()));
}
- }
-
+ }
+
return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
}
@@ -1441,7 +1441,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::vector<QualType> ResultRegQualTys;
std::vector<llvm::Type *> ResultRegTypes;
std::vector<llvm::Type *> ResultTruncRegTypes;
- std::vector<llvm::Type*> ArgTypes;
+ std::vector<llvm::Type *> ArgTypes;
std::vector<llvm::Value*> Args;
// Keep track of inout constraints.
@@ -1656,7 +1656,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// the expression, do the conversion.
if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
llvm::Type *TruncTy = ResultTruncRegTypes[i];
-
+
// Truncate the integer result to the right size, note that TruncTy can be
// a pointer.
if (TruncTy->isFloatingPointTy())
@@ -1681,3 +1681,25 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
}
}
+
+void CodeGenFunction::EmitMSAsmStmt(const MSAsmStmt &S) {
+ // MS-style inline assembly is not fully supported, so sema emits a warning.
+ if (!CGM.getCodeGenOpts().EmitMicrosoftInlineAsm)
+ return;
+
+ assert (S.isSimple() && "CodeGen can only handle simple MSAsmStmts.");
+
+ std::vector<llvm::Value*> Args;
+ std::vector<llvm::Type *> ArgTypes;
+
+ std::string MachineClobbers = Target.getClobbers();
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, ArgTypes, false);
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, *S.getAsmString(), MachineClobbers, true);
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args);
+ Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+ Result->addAttribute(~0, llvm::Attribute::IANSDialect);
+}
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 17a0537..cdaa26a 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -355,13 +355,14 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
#ifndef NDEBUG
- const CGFunctionInfo &CallFnInfo =
- CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ const CGFunctionInfo &CallFnInfo =
+ CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT,
RequiredArgs::forPrototypePlus(FPT, 1));
assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() &&
CallFnInfo.isNoReturn() == FnInfo.isNoReturn() &&
CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention());
- assert(similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
+ assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
+ similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
FnInfo.getReturnInfo(), FnInfo.getReturnType()));
assert(CallFnInfo.arg_size() == FnInfo.arg_size());
for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i)
@@ -386,6 +387,9 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
if (!ResultType->isVoidType() && Slot.isNull())
CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
+ // Disable the final ARC autorelease.
+ AutoreleaseResult = false;
+
FinishFunction();
// Set the right linkage.
@@ -569,14 +573,13 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
// We have a pure virtual member function.
if (!PureVirtualFn) {
- llvm::FunctionType *Ty =
- llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- PureVirtualFn =
- CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual");
- PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
- Int8PtrTy);
+ llvm::FunctionType *Ty =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
+ PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
+ PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
+ CGM.Int8PtrTy);
}
-
Init = PureVirtualFn;
} else {
// Check if we should use a thunk.
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index ac704e7..a46f313 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -153,7 +153,7 @@ class LValue {
private:
void Initialize(QualType Type, Qualifiers Quals,
- CharUnits Alignment = CharUnits(),
+ CharUnits Alignment,
llvm::MDNode *TBAAInfo = 0) {
this->Type = Type;
this->Quals = Quals;
@@ -295,12 +295,12 @@ public:
/// access.
static LValue MakeBitfield(llvm::Value *BaseValue,
const CGBitFieldInfo &Info,
- QualType type) {
+ QualType type, CharUnits Alignment) {
LValue R;
R.LVType = BitField;
R.V = BaseValue;
R.BitFieldInfo = &Info;
- R.Initialize(type, type.getQualifiers());
+ R.Initialize(type, type.getQualifiers(), Alignment);
return R;
}
@@ -389,7 +389,8 @@ public:
return AV;
}
- static AggValueSlot forLValue(LValue LV, IsDestructed_t isDestructed,
+ static AggValueSlot forLValue(const LValue &LV,
+ IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 7b1dbce..76be85f 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -8,8 +8,6 @@ set(LLVM_LINK_COMPONENTS
vectorize
)
-set(LLVM_USED_LIBS clangBasic clangAST clangFrontend)
-
add_clang_library(clangCodeGen
BackendUtil.cpp
CGBlocks.cpp
@@ -52,5 +50,19 @@ add_clang_library(clangCodeGen
TargetInfo.cpp
)
-add_dependencies(clangCodeGen ClangAttrClasses ClangAttrList ClangDeclNodes
- ClangStmtNodes)
+add_dependencies(clangCodeGen
+ ClangARMNeon
+ ClangAttrClasses
+ ClangAttrList
+ ClangCommentNodes
+ ClangDeclNodes
+ ClangDiagnosticCommon
+ ClangDiagnosticFrontend
+ ClangStmtNodes
+ )
+
+target_link_libraries(clangCodeGen
+ clangBasic
+ clangAST
+ clangFrontend
+ )
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 2939062..1d02861 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -23,12 +23,12 @@
#include "clang/AST/StmtCXX.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
-#include "llvm/Support/MDBuilder.h"
+#include "llvm/MDBuilder.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
-CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm),
Target(CGM.getContext().getTargetInfo()),
Builder(cgm.getModule().getContext()),
@@ -42,7 +42,8 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
TerminateHandler(0), TrapBB(0) {
CatchUndefined = getContext().getLangOpts().CatchUndefined;
- CGM.getCXXABI().getMangleContext().startNewFunction();
+ if (!suppressNewContext)
+ CGM.getCXXABI().getMangleContext().startNewFunction();
}
CodeGenFunction::~CodeGenFunction() {
@@ -251,6 +252,81 @@ void CodeGenFunction::EmitMCountInstrumentation() {
Builder.CreateCall(MCountFn);
}
+// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
+// information in the program executable. The argument information stored
+// includes the argument name, its type, the address and access qualifiers used.
+// FIXME: Add type, address, and access qualifiers.
+static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
+ CodeGenModule &CGM,llvm::LLVMContext &Context,
+ llvm::SmallVector <llvm::Value*, 5> &kernelMDArgs) {
+
+ // Create MDNodes that represents the kernel arg metadata.
+ // Each MDNode is a list in the form of "key", N number of values which is
+ // the same number of values as their are kernel arguments.
+
+ // MDNode for the kernel argument names.
+ SmallVector<llvm::Value*, 8> argNames;
+ argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
+
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
+ const ParmVarDecl *parm = FD->getParamDecl(i);
+
+ // Get argument name.
+ argNames.push_back(llvm::MDString::get(Context, parm->getName()));
+
+ }
+ // Add MDNode to the list of all metadata.
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
+}
+
+void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
+ llvm::Function *Fn)
+{
+ if (!FD->hasAttr<OpenCLKernelAttr>())
+ return;
+
+ llvm::LLVMContext &Context = getLLVMContext();
+
+ llvm::SmallVector <llvm::Value*, 5> kernelMDArgs;
+ kernelMDArgs.push_back(Fn);
+
+ if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
+ GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs);
+
+ if (FD->hasAttr<WorkGroupSizeHintAttr>()) {
+ llvm::SmallVector <llvm::Value*, 5> attrMDArgs;
+ attrMDArgs.push_back(llvm::MDString::get(Context, "work_group_size_hint"));
+ WorkGroupSizeHintAttr *attr = FD->getAttr<WorkGroupSizeHintAttr>();
+ llvm::Type *iTy = llvm::IntegerType::get(Context, 32);
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getXDim())));
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getYDim())));
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getZDim())));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
+ }
+
+ if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
+ llvm::SmallVector <llvm::Value*, 5> attrMDArgs;
+ attrMDArgs.push_back(llvm::MDString::get(Context, "reqd_work_group_size"));
+ ReqdWorkGroupSizeAttr *attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
+ llvm::Type *iTy = llvm::IntegerType::get(Context, 32);
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getXDim())));
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getYDim())));
+ attrMDArgs.push_back(llvm::ConstantInt::get(iTy,
+ llvm::APInt(32, (uint64_t)attr->getZDim())));
+ kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
+ }
+
+ llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs);
+ llvm::NamedMDNode *OpenCLKernelMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
+ OpenCLKernelMetadata->addOperand(kernelMDNode);
+}
+
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
@@ -279,14 +355,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (getContext().getLangOpts().OpenCL) {
// Add metadata for a kernel function.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
- if (FD->hasAttr<OpenCLKernelAttr>()) {
- llvm::LLVMContext &Context = getLLVMContext();
- llvm::NamedMDNode *OpenCLMetadata =
- CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
-
- llvm::Value *Op = Fn;
- OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op));
- }
+ EmitOpenCLKernelMetadata(FD, Fn);
}
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
@@ -537,7 +606,7 @@ bool CodeGenFunction::containsBreak(const Stmt *S) {
/// constant folds return true and set the boolean result in Result.
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
bool &ResultBool) {
- llvm::APInt ResultInt;
+ llvm::APSInt ResultInt;
if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
return false;
@@ -549,7 +618,7 @@ bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
/// to a constant, or if it does but contains a label, return false. If it
/// constant folds return true and set the folded value.
bool CodeGenFunction::
-ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) {
+ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) {
// FIXME: Rename and handle conversion of other evaluatable things
// to bool.
llvm::APSInt Int;
@@ -687,10 +756,10 @@ void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
/// emitNonZeroVLAInit - Emit the "zero" initialization of a
/// variable-length array whose elements have a non-zero bit-pattern.
///
+/// \param baseType the inner-most element type of the array
/// \param src - a char* pointing to the bit-pattern for a single
/// base element of the array
/// \param sizeInChars - the total size of the VLA, in chars
-/// \param align - the total alignment of the VLA
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
llvm::Value *dest, llvm::Value *src,
llvm::Value *sizeInChars) {
@@ -881,33 +950,49 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
llvm::ConstantInt *zero = Builder.getInt32(0);
gepIndices.push_back(zero);
- // It's more efficient to calculate the count from the LLVM
- // constant-length arrays than to re-evaluate the array bounds.
uint64_t countFromCLAs = 1;
+ QualType eltType;
llvm::ArrayType *llvmArrayType =
- cast<llvm::ArrayType>(
+ dyn_cast<llvm::ArrayType>(
cast<llvm::PointerType>(addr->getType())->getElementType());
- while (true) {
+ while (llvmArrayType) {
assert(isa<ConstantArrayType>(arrayType));
assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
== llvmArrayType->getNumElements());
gepIndices.push_back(zero);
countFromCLAs *= llvmArrayType->getNumElements();
+ eltType = arrayType->getElementType();
llvmArrayType =
dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
- if (!llvmArrayType) break;
-
arrayType = getContext().getAsArrayType(arrayType->getElementType());
- assert(arrayType && "LLVM and Clang types are out-of-synch");
+ assert((!llvmArrayType || arrayType) &&
+ "LLVM and Clang types are out-of-synch");
}
- baseType = arrayType->getElementType();
+ if (arrayType) {
+ // From this point onwards, the Clang array type has been emitted
+ // as some other type (probably a packed struct). Compute the array
+ // size, and just emit the 'begin' expression as a bitcast.
+ while (arrayType) {
+ countFromCLAs *=
+ cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
+ eltType = arrayType->getElementType();
+ arrayType = getContext().getAsArrayType(eltType);
+ }
+
+ unsigned AddressSpace =
+ cast<llvm::PointerType>(addr->getType())->getAddressSpace();
+ llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
+ addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
+ } else {
+ // Create the actual GEP.
+ addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
+ }
- // Create the actual GEP.
- addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
+ baseType = eltType;
llvm::Value *numElements
= llvm::ConstantInt::get(SizeTy, countFromCLAs);
@@ -1071,7 +1156,8 @@ void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
llvm::Constant *Init) {
assert (Init && "Invalid DeclRefExpr initializer!");
if (CGDebugInfo *Dbg = getDebugInfo())
- Dbg->EmitGlobalVariable(E->getDecl(), Init);
+ if (CGM.getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo)
+ Dbg->EmitGlobalVariable(E->getDecl(), Init);
}
CodeGenFunction::PeepholeProtection
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 83f1e2d..ed3e43b 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -591,6 +591,11 @@ public:
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+ /// BoundsChecking - Emit run-time bounds checks. Higher values mean
+ /// potentially higher performance penalties.
+ unsigned char BoundsChecking;
+
+ /// CatchUndefined - Emit run-time checks to catch undefined behaviors.
bool CatchUndefined;
/// In ARC, whether we should autorelease the return value.
@@ -1192,8 +1197,18 @@ private:
llvm::BasicBlock *TerminateHandler;
llvm::BasicBlock *TrapBB;
+ /// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
+ /// In the kernel metadata node, reference the kernel function and metadata
+ /// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
+ /// - A node for the work_group_size_hint(X,Y,Z) qualifier contains string
+ /// "work_group_size_hint", and three 32-bit integers X, Y and Z.
+ /// - A node for the reqd_work_group_size(X,Y,Z) qualifier contains string
+ /// "reqd_work_group_size", and three 32-bit integers X, Y and Z.
+ void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
+ llvm::Function *Fn);
+
public:
- CodeGenFunction(CodeGenModule &cgm);
+ CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
~CodeGenFunction();
CodeGenTypes &getTypes() const { return CGM.getTypes(); }
@@ -1305,6 +1320,7 @@ public:
const ObjCPropertyImplDecl *PID);
void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
+ const ObjCMethodDecl *GetterMothodDecl,
llvm::Constant *AtomicHelperFn);
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
@@ -1560,6 +1576,7 @@ public:
return LValue::MakeAddr(V, T, Alignment, getContext(),
CGM.getTBAAInfo(T));
}
+
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
CharUnits Alignment;
if (!T->isIncompleteType())
@@ -1616,8 +1633,8 @@ public:
///
/// \param IgnoreResult - True if the resulting value isn't used.
RValue EmitAnyExpr(const Expr *E,
- AggValueSlot AggSlot = AggValueSlot::ignored(),
- bool IgnoreResult = false);
+ AggValueSlot aggSlot = AggValueSlot::ignored(),
+ bool ignoreResult = false);
// EmitVAListRef - Emit a "reference" to a va_list; this is either the address
// or the value of the expression, depending on how va_list is defined.
@@ -1643,7 +1660,7 @@ public:
/// volatile.
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType EltTy, bool isVolatile=false,
- unsigned Alignment = 0);
+ CharUnits Alignment = CharUnits::Zero());
/// StartBlock - Start new block named N. If insert block is a dummy block
/// then reuse it.
@@ -1964,6 +1981,7 @@ public:
void EmitCaseStmt(const CaseStmt &S);
void EmitCaseStmtRange(const CaseStmt &S);
void EmitAsmStmt(const AsmStmt &S);
+ void EmitMSAsmStmt(const MSAsmStmt &S);
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
@@ -2099,6 +2117,7 @@ public:
LValue EmitMemberExpr(const MemberExpr *E);
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
+ LValue EmitInitListLValue(const InitListExpr *E);
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
@@ -2143,9 +2162,6 @@ public:
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
- LValue EmitLValueForAnonRecordField(llvm::Value* Base,
- const IndirectFieldDecl* Field,
- unsigned CVRQualifiers);
LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
/// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
@@ -2158,9 +2174,6 @@ public:
llvm::Value* Base, const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers);
- LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
- unsigned CVRQualifiers);
-
LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
LValue EmitLambdaLValue(const LambdaExpr *E);
@@ -2259,12 +2272,11 @@ public:
llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
- llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
- llvm::Value *EmitObjCNumericLiteral(const ObjCNumericLiteral *E);
+ llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
@@ -2359,7 +2371,7 @@ public:
/// EmitAggExpr - Emit the computation of the specified expression
/// of aggregate type. The result is computed into the given slot,
/// which may be null to indicate that the value is not needed.
- void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
+ void EmitAggExpr(const Expr *E, AggValueSlot AS);
/// EmitAggExprToLValue - Emit the computation of the specified expression of
/// aggregate type into a temporary LValue.
@@ -2411,10 +2423,9 @@ public:
void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
bool PerformInit);
- /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
- /// with the C++ runtime so that its destructor will be called at exit.
- void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
- llvm::Constant *DeclPtr);
+ /// Call atexit() with a function that passes the given argument to
+ /// the given function.
+ void registerGlobalDtorWithAtExit(llvm::Constant *fn, llvm::Constant *addr);
/// Emit code in this function to perform a guarded variable
/// initialization. Guarded initializations are used when it's not
@@ -2497,7 +2508,7 @@ public:
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
/// to a constant, or if it does but contains a label, return false. If it
/// constant folds return true and set the folded value.
- bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &Result);
+ bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result);
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
/// if statement) to the specified blocks. Based on the condition, this might
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 9a55c08..3ae3c52 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -102,14 +102,16 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
if (LangOpts.CUDA)
createCUDARuntime();
- // Enable TBAA unless it's suppressed.
- if (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)
- TBAA = new CodeGenTBAA(Context, VMContext, getLangOpts(),
+ // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
+ if (LangOpts.ThreadSanitizer ||
+ (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
+ TBAA = new CodeGenTBAA(Context, VMContext, CodeGenOpts, getLangOpts(),
ABI.getMangleContext());
// If debug info or coverage generation is enabled, create the CGDebugInfo
// object.
- if (CodeGenOpts.DebugInfo || CodeGenOpts.EmitGcovArcs ||
+ if (CodeGenOpts.DebugInfo != CodeGenOptions::NoDebugInfo ||
+ CodeGenOpts.EmitGcovArcs ||
CodeGenOpts.EmitGcovNotes)
DebugInfo = new CGDebugInfo(*this);
@@ -133,10 +135,22 @@ CodeGenModule::~CodeGenModule() {
}
void CodeGenModule::createObjCRuntime() {
- if (!LangOpts.NeXTRuntime)
+ // This is just isGNUFamily(), but we want to force implementors of
+ // new ABIs to decide how best to do this.
+ switch (LangOpts.ObjCRuntime.getKind()) {
+ case ObjCRuntime::GNUstep:
+ case ObjCRuntime::GCC:
+ case ObjCRuntime::ObjFW:
ObjCRuntime = CreateGNUObjCRuntime(*this);
- else
+ return;
+
+ case ObjCRuntime::FragileMacOSX:
+ case ObjCRuntime::MacOSX:
+ case ObjCRuntime::iOS:
ObjCRuntime = CreateMacObjCRuntime(*this);
+ return;
+ }
+ llvm_unreachable("bad runtime kind");
}
void CodeGenModule::createOpenCLRuntime() {
@@ -245,6 +259,45 @@ void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
GV->setVisibility(GetLLVMVisibility(LV.visibility()));
}
+static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
+ return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
+ .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
+ .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
+ .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
+ .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
+}
+
+static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(
+ CodeGenOptions::TLSModel M) {
+ switch (M) {
+ case CodeGenOptions::GeneralDynamicTLSModel:
+ return llvm::GlobalVariable::GeneralDynamicTLSModel;
+ case CodeGenOptions::LocalDynamicTLSModel:
+ return llvm::GlobalVariable::LocalDynamicTLSModel;
+ case CodeGenOptions::InitialExecTLSModel:
+ return llvm::GlobalVariable::InitialExecTLSModel;
+ case CodeGenOptions::LocalExecTLSModel:
+ return llvm::GlobalVariable::LocalExecTLSModel;
+ }
+ llvm_unreachable("Invalid TLS model!");
+}
+
+void CodeGenModule::setTLSMode(llvm::GlobalVariable *GV,
+ const VarDecl &D) const {
+ assert(D.isThreadSpecified() && "setting TLS mode on non-TLS var!");
+
+ llvm::GlobalVariable::ThreadLocalMode TLM;
+ TLM = GetLLVMTLSModel(CodeGenOpts.DefaultTLSModel);
+
+ // Override the TLS model if it is explicitly specified.
+ if (D.hasAttr<TLSModelAttr>()) {
+ const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>();
+ TLM = GetLLVMTLSModel(Attr->getModel());
+ }
+
+ GV->setThreadLocalMode(TLM);
+}
+
/// Set the symbol visibility of type information (vtable and RTTI)
/// associated with the given type.
void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
@@ -334,7 +387,8 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Out);
else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND))
- getCXXABI().getMangleContext().mangleBlock(BD, Out);
+ getCXXABI().getMangleContext().mangleBlock(BD, Out,
+ dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()));
else
getCXXABI().getMangleContext().mangleName(ND, Out);
@@ -355,7 +409,8 @@ void CodeGenModule::getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
const Decl *D = GD.getDecl();
llvm::raw_svector_ostream Out(Buffer.getBuffer());
if (D == 0)
- MangleCtx.mangleGlobalBlock(BD, Out);
+ MangleCtx.mangleGlobalBlock(BD,
+ dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
@@ -474,8 +529,7 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
unsigned CallingConv;
AttributeListType AttributeList;
ConstructAttributeList(Info, D, AttributeList, CallingConv);
- F->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
- AttributeList.size()));
+ F->setAttributes(llvm::AttrListPtr::get(AttributeList));
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
@@ -493,7 +547,7 @@ static bool hasUnwindExceptions(const LangOptions &LangOpts) {
// If ObjC exceptions are enabled, this depends on the ABI.
if (LangOpts.ObjCExceptions) {
- if (!LangOpts.ObjCNonFragileABI) return false;
+ return LangOpts.ObjCRuntime.hasUnwindExceptions();
}
return true;
@@ -517,10 +571,14 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
F->addFnAttr(llvm::Attribute::NoInline);
// (noinline wins over always_inline, and we can't specify both in IR)
- if (D->hasAttr<AlwaysInlineAttr>() &&
+ if ((D->hasAttr<AlwaysInlineAttr>() || D->hasAttr<ForceInlineAttr>()) &&
!F->hasFnAttr(llvm::Attribute::NoInline))
F->addFnAttr(llvm::Attribute::AlwaysInline);
+ // FIXME: Communicate hot and cold attributes to LLVM more directly.
+ if (D->hasAttr<ColdAttr>())
+ F->addFnAttr(llvm::Attribute::OptimizeForSize);
+
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
F->setUnnamedAddr(true);
@@ -652,7 +710,7 @@ void CodeGenModule::EmitDeferred() {
if (!DeferredVTables.empty()) {
const CXXRecordDecl *RD = DeferredVTables.back();
DeferredVTables.pop_back();
- getVTables().GenerateClassData(getVTableLinkage(RD), RD);
+ getCXXABI().EmitVTables(RD);
continue;
}
@@ -930,7 +988,7 @@ CodeGenModule::shouldEmitFunction(const FunctionDecl *F) {
if (getFunctionLinkage(F) != llvm::Function::AvailableExternallyLinkage)
return true;
if (CodeGenOpts.OptimizationLevel == 0 &&
- !F->hasAttr<AlwaysInlineAttr>())
+ !F->hasAttr<AlwaysInlineAttr>() && !F->hasAttr<ForceInlineAttr>())
return false;
// PR9614. Avoid cases where the source code is lying to us. An available
// externally function should have an equivalent function somewhere else,
@@ -1054,6 +1112,7 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
} else if (getLangOpts().CPlusPlus && D.getDecl()) {
// Look for a declaration that's lexically in a record.
const FunctionDecl *FD = cast<FunctionDecl>(D.getDecl());
+ FD = FD->getMostRecentDecl();
do {
if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
if (FD->isImplicit() && !ForVTable) {
@@ -1166,11 +1225,12 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
DeferredDecls.erase(DDI);
}
+ unsigned AddrSpace = GetGlobalVarAddressSpace(D, Ty->getAddressSpace());
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
llvm::GlobalValue::ExternalLinkage,
0, MangledName, 0,
- false, Ty->getAddressSpace());
+ llvm::GlobalVariable::NotThreadLocal, AddrSpace);
// Handle things which are present even on external declarations.
if (D) {
@@ -1193,10 +1253,14 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
GV->setVisibility(GetLLVMVisibility(LV.visibility()));
}
- GV->setThreadLocal(D->isThreadSpecified());
+ if (D->isThreadSpecified())
+ setTLSMode(GV, *D);
}
- return GV;
+ if (AddrSpace != Ty->getAddressSpace())
+ return llvm::ConstantExpr::getBitCast(GV, Ty);
+ else
+ return GV;
}
@@ -1286,7 +1350,7 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
void CodeGenModule::EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired) {
if (DefinitionRequired)
- getVTables().GenerateClassData(getVTableLinkage(Class), Class);
+ getCXXABI().EmitVTables(Class);
}
llvm::GlobalVariable::LinkageTypes
@@ -1481,6 +1545,20 @@ CodeGenModule::MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
return llvmInit;
}
+unsigned CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D,
+ unsigned AddrSpace) {
+ if (LangOpts.CUDA && CodeGenOpts.CUDAIsDevice) {
+ if (D->hasAttr<CUDAConstantAttr>())
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_constant);
+ else if (D->hasAttr<CUDASharedAttr>())
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_shared);
+ else
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_device);
+ }
+
+ return AddrSpace;
+}
+
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
llvm::Constant *Init = 0;
QualType ASTTy = D->getType();
@@ -1511,8 +1589,10 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
// FIXME: It does so in a global constructor, which is *not* what we
// want.
- if (!Init)
+ if (!Init) {
+ initializedGlobalDecl = GlobalDecl(D);
Init = EmitConstantInit(*InitDecl);
+ }
if (!Init) {
QualType T = InitExpr->getType();
if (D->getType()->isReferenceType())
@@ -1560,7 +1640,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
if (GV == 0 ||
GV->getType()->getElementType() != InitType ||
GV->getType()->getAddressSpace() !=
- getContext().getTargetAddressSpace(ASTTy)) {
+ GetGlobalVarAddressSpace(D, getContext().getTargetAddressSpace(ASTTy))) {
// Move the old entry aside so that we'll create a new one.
Entry->setName(StringRef());
@@ -1604,7 +1684,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
- DI->EmitGlobalVariable(GV, D);
+ if (getCodeGenOpts().DebugInfo >= CodeGenOptions::LimitedDebugInfo)
+ DI->EmitGlobalVariable(GV, D);
}
llvm::GlobalValue::LinkageTypes
@@ -1710,8 +1791,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
ArgList.clear();
if (!NewCall->getType()->isVoidTy())
NewCall->takeName(CI);
- NewCall->setAttributes(llvm::AttrListPtr::get(AttrVec.begin(),
- AttrVec.end()));
+ NewCall->setAttributes(llvm::AttrListPtr::get(AttrVec));
NewCall->setCallingConv(CI->getCallingConv());
// Finally, remove the old call, replacing any uses with the new one.
@@ -2059,7 +2139,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
std::string StringClass(getLangOpts().ObjCConstantStringClass);
llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
llvm::Constant *GV;
- if (LangOpts.ObjCNonFragileABI) {
+ if (LangOpts.ObjCRuntime.isNonFragile()) {
std::string str =
StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
: "OBJC_CLASS_$_" + StringClass;
@@ -2104,7 +2184,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
FieldTypes[i], /*TInfo=*/0,
/*BitWidth=*/0,
/*Mutable=*/false,
- /*HasInit=*/false);
+ ICIS_NoInit);
Field->setAccess(AS_public);
D->addDecl(Field);
}
@@ -2147,7 +2227,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
"_unnamed_nsstring_");
// FIXME. Fix section.
if (const char *Sect =
- LangOpts.ObjCNonFragileABI
+ LangOpts.ObjCRuntime.isNonFragile()
? getContext().getTargetInfo().getNSStringNonFragileABISection()
: getContext().getTargetInfo().getNSStringSection())
GV->setSection(Sect);
@@ -2179,7 +2259,7 @@ QualType CodeGenModule::getObjCFastEnumerationStateType() {
FieldTypes[i], /*TInfo=*/0,
/*BitWidth=*/0,
/*Mutable=*/false,
- /*HasInit=*/false);
+ ICIS_NoInit);
Field->setAccess(AS_public);
D->addDecl(Field);
}
@@ -2506,14 +2586,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// Forward declarations, no (immediate) code generation.
case Decl::ObjCInterface:
+ case Decl::ObjCCategory:
break;
-
- case Decl::ObjCCategory: {
- ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
- if (CD->IsClassExtension() && CD->hasSynthBitfield())
- Context.ResetObjCLayout(CD->getClassInterface());
- break;
- }
case Decl::ObjCProtocol: {
ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(D);
@@ -2530,8 +2604,6 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::ObjCImplementation: {
ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
- if (LangOpts.ObjCNonFragileABI2 && OMD->hasSynthBitfield())
- Context.ResetObjCLayout(OMD->getClassInterface());
EmitObjCPropertyImplementations(OMD);
EmitObjCIvarInitializations(OMD);
ObjCRuntime->GenerateClass(OMD);
@@ -2564,7 +2636,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
const std::string &S = getModule().getModuleInlineAsm();
if (S.empty())
getModule().setModuleInlineAsm(AsmString);
- else if (*--S.end() == '\n')
+ else if (S.end()[-1] == '\n')
getModule().setModuleInlineAsm(S + AsmString.str());
else
getModule().setModuleInlineAsm(S + '\n' + AsmString.str());
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 38f5008..d6ff50d 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -138,6 +138,7 @@ namespace CodeGen {
union {
unsigned char PointerAlignInBytes;
unsigned char PointerSizeInBytes;
+ unsigned char SizeSizeInBytes; // sizeof(size_t)
};
};
@@ -350,6 +351,8 @@ class CodeGenModule : public CodeGenTypeCache {
struct {
int GlobalUniqueCount;
} Block;
+
+ GlobalDecl initializedGlobalDecl;
/// @}
public:
@@ -471,6 +474,10 @@ public:
/// GlobalValue.
void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
+ /// setTLSMode - Set the TLS mode for the given LLVM GlobalVariable
+ /// for the thread-local variable declaration D.
+ void setTLSMode(llvm::GlobalVariable *GV, const VarDecl &D) const;
+
/// TypeVisibilityKind - The kind of global variable that is passed to
/// setTypeVisibility
enum TypeVisibilityKind {
@@ -516,6 +523,12 @@ public:
CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty,
llvm::GlobalValue::LinkageTypes Linkage);
+ /// GetGlobalVarAddressSpace - Return the address space of the underlying
+ /// global variable for D, as determined by its declaration. Normally this
+ /// is the same as the address space of D's type, but in CUDA, address spaces
+ /// are associated with declarations, not types.
+ unsigned GetGlobalVarAddressSpace(const VarDecl *D, unsigned AddrSpace);
+
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
/// given global variable. If Ty is non-null and if the global doesn't exist,
/// then it will be greated with the specified type instead of whatever the
@@ -580,7 +593,7 @@ public:
/// getUniqueBlockCount - Fetches the global unique block count.
int getUniqueBlockCount() { return ++Block.GlobalUniqueCount; }
-
+
/// getBlockDescriptorType - Fetches the type of a generic block
/// descriptor.
llvm::Type *getBlockDescriptorType();
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index a3cadcf..bab60af 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -18,6 +18,7 @@
#include "CodeGenTBAA.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Mangle.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/Constants.h"
@@ -26,8 +27,9 @@ using namespace clang;
using namespace CodeGen;
CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext& VMContext,
+ const CodeGenOptions &CGO,
const LangOptions &Features, MangleContext &MContext)
- : Context(Ctx), VMContext(VMContext), Features(Features), MContext(MContext),
+ : Context(Ctx), CodeGenOpts(CGO), Features(Features), MContext(MContext),
MDHelper(VMContext), Root(0), Char(0) {
}
@@ -74,6 +76,10 @@ static bool TypeHasMayAlias(QualType QTy) {
llvm::MDNode *
CodeGenTBAA::getTBAAInfo(QualType QTy) {
+ // At -O0 TBAA is not emitted for regular types.
+ if (CodeGenOpts.OptimizationLevel == 0 || CodeGenOpts.RelaxedAliasing)
+ return NULL;
+
// If the type has the may_alias attribute (even on a typedef), it is
// effectively in the general char alias class.
if (TypeHasMayAlias(QTy))
diff --git a/lib/CodeGen/CodeGenTBAA.h b/lib/CodeGen/CodeGenTBAA.h
index 4a97852..c17a5cf 100644
--- a/lib/CodeGen/CodeGenTBAA.h
+++ b/lib/CodeGen/CodeGenTBAA.h
@@ -16,8 +16,8 @@
#define CLANG_CODEGEN_CODEGENTBAA_H
#include "clang/Basic/LLVM.h"
+#include "llvm/MDBuilder.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/Support/MDBuilder.h"
namespace llvm {
class LLVMContext;
@@ -26,6 +26,7 @@ namespace llvm {
namespace clang {
class ASTContext;
+ class CodeGenOptions;
class LangOptions;
class MangleContext;
class QualType;
@@ -38,7 +39,7 @@ namespace CodeGen {
/// while lowering AST types to LLVM types.
class CodeGenTBAA {
ASTContext &Context;
- llvm::LLVMContext& VMContext;
+ const CodeGenOptions &CodeGenOpts;
const LangOptions &Features;
MangleContext &MContext;
@@ -61,6 +62,7 @@ class CodeGenTBAA {
public:
CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
+ const CodeGenOptions &CGO,
const LangOptions &Features,
MangleContext &MContext);
~CodeGenTBAA();
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 41fd536..9a78dae 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -474,11 +474,11 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// build it.
const CGFunctionInfo *FI;
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
- FI = &arrangeFunctionType(
+ FI = &arrangeFreeFunctionType(
CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
} else {
const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
- FI = &arrangeFunctionType(
+ FI = &arrangeFreeFunctionType(
CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index ba2b3ae..3c29d2d 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -189,26 +189,32 @@ public:
const CGFunctionInfo &arrangeCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type);
- const CGFunctionInfo &arrangeFunctionCall(const CallArgList &Args,
- const FunctionType *Ty);
- const CGFunctionInfo &arrangeFunctionCall(QualType ResTy,
- const CallArgList &args,
- const FunctionType::ExtInfo &info,
- RequiredArgs required);
-
- const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionProtoType> Ty);
- const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionNoProtoType> Ty);
+ const CGFunctionInfo &arrangeFreeFunctionCall(const CallArgList &Args,
+ const FunctionType *Ty);
+ const CGFunctionInfo &arrangeFreeFunctionCall(QualType ResTy,
+ const CallArgList &args,
+ FunctionType::ExtInfo info,
+ RequiredArgs required);
+
+ const CGFunctionInfo &arrangeCXXMethodCall(const CallArgList &args,
+ const FunctionProtoType *type,
+ RequiredArgs required);
+
+ const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty);
const CGFunctionInfo &arrangeCXXMethodType(const CXXRecordDecl *RD,
const FunctionProtoType *FTP);
- /// Retrieves the ABI information for the given function signature.
- /// This is the "core" routine to which all the others defer.
+ /// "Arrange" the LLVM information for a call or type with the given
+ /// signature. This is largely an internal method; other clients
+ /// should use one of the above routines, which ultimately defer to
+ /// this.
///
/// \param argTypes - must all actually be canonical as params
- const CGFunctionInfo &arrangeFunctionType(CanQualType returnType,
- ArrayRef<CanQualType> argTypes,
- const FunctionType::ExtInfo &info,
- RequiredArgs args);
+ const CGFunctionInfo &arrangeLLVMFunctionInfo(CanQualType returnType,
+ ArrayRef<CanQualType> argTypes,
+ FunctionType::ExtInfo info,
+ RequiredArgs args);
/// \brief Compute a new LLVM record layout object for the given record.
CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 98f67f3..0b7ce36 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -20,6 +20,7 @@
#include "CGCXXABI.h"
#include "CGRecordLayout.h"
+#include "CGVTables.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include <clang/AST/Mangle.h>
@@ -48,10 +49,6 @@ protected:
return PtrDiffTy;
}
- bool NeedsArrayCookie(const CXXNewExpr *expr);
- bool NeedsArrayCookie(const CXXDeleteExpr *expr,
- QualType elementType);
-
public:
ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) :
CGCXXABI(CGM), PtrDiffTy(0), IsARM(IsARM) { }
@@ -111,19 +108,24 @@ public:
void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
- CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+ StringRef GetPureVirtualCallName() { return "__cxa_pure_virtual"; }
+
+ CharUnits getArrayCookieSizeImpl(QualType elementType);
llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType);
- void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
- const CXXDeleteExpr *expr,
- QualType ElementType, llvm::Value *&NumElements,
- llvm::Value *&AllocPtr, CharUnits &CookieSize);
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *allocPtr,
+ CharUnits cookieSize);
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr, bool PerformInit);
+ void registerGlobalDtor(CodeGenFunction &CGF, llvm::Constant *dtor,
+ llvm::Constant *addr);
+
+ void EmitVTables(const CXXRecordDecl *Class);
};
class ARMCXXABI : public ItaniumCXXABI {
@@ -148,16 +150,14 @@ public:
void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy);
- CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+ CharUnits getArrayCookieSizeImpl(QualType elementType);
llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType);
- void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
- const CXXDeleteExpr *expr,
- QualType ElementType, llvm::Value *&NumElements,
- llvm::Value *&AllocPtr, CharUnits &CookieSize);
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr,
+ CharUnits cookieSize);
private:
/// \brief Returns true if the given instance method is one of the
@@ -796,54 +796,11 @@ void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
/************************** Array allocation cookies **************************/
-bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
- // If the class's usual deallocation function takes two arguments,
- // it needs a cookie.
- if (expr->doesUsualArrayDeleteWantSize())
- return true;
-
- // Automatic Reference Counting:
- // We need an array cookie for pointers with strong or weak lifetime.
- QualType AllocatedType = expr->getAllocatedType();
- if (getContext().getLangOpts().ObjCAutoRefCount &&
- AllocatedType->isObjCLifetimeType()) {
- switch (AllocatedType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- return false;
-
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Weak:
- return true;
- }
- }
-
- // Otherwise, if the class has a non-trivial destructor, it always
- // needs a cookie.
- const CXXRecordDecl *record =
- AllocatedType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
- return (record && !record->hasTrivialDestructor());
-}
-
-bool ItaniumCXXABI::NeedsArrayCookie(const CXXDeleteExpr *expr,
- QualType elementType) {
- // If the class's usual deallocation function takes two arguments,
- // it needs a cookie.
- if (expr->doesUsualArrayDeleteWantSize())
- return true;
-
- return elementType.isDestructedType();
-}
-
-CharUnits ItaniumCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
- if (!NeedsArrayCookie(expr))
- return CharUnits::Zero();
-
- // Padding is the maximum of sizeof(size_t) and alignof(elementType)
- ASTContext &Ctx = getContext();
- return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
- Ctx.getTypeAlignInChars(expr->getAllocatedType()));
+CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
+ // The array cookie is a size_t; pad that up to the element alignment.
+ // The cookie is actually right-justified in that space.
+ return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
+ CGM.getContext().getTypeAlignInChars(elementType));
}
llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
@@ -851,7 +808,7 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType) {
- assert(NeedsArrayCookie(expr));
+ assert(requiresArrayCookie(expr));
unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
@@ -862,6 +819,7 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
// The size of the cookie.
CharUnits CookieSize =
std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
+ assert(CookieSize == getArrayCookieSizeImpl(ElementType));
// Compute an offset to the cookie.
llvm::Value *CookiePtr = NewPtr;
@@ -882,53 +840,25 @@ llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
CookieSize.getQuantity());
}
-void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
- llvm::Value *Ptr,
- const CXXDeleteExpr *expr,
- QualType ElementType,
- llvm::Value *&NumElements,
- llvm::Value *&AllocPtr,
- CharUnits &CookieSize) {
- // Derive a char* in the same address space as the pointer.
- unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
- llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
-
- // If we don't need an array cookie, bail out early.
- if (!NeedsArrayCookie(expr, ElementType)) {
- AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
- NumElements = 0;
- CookieSize = CharUnits::Zero();
- return;
- }
-
- QualType SizeTy = getContext().getSizeType();
- CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
- llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
-
- CookieSize
- = std::max(SizeSize, getContext().getTypeAlignInChars(ElementType));
-
- CharUnits NumElementsOffset = CookieSize - SizeSize;
-
- // Compute the allocated pointer.
- AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
- AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
- -CookieSize.getQuantity());
-
- llvm::Value *NumElementsPtr = AllocPtr;
- if (!NumElementsOffset.isZero())
- NumElementsPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(NumElementsPtr,
- NumElementsOffset.getQuantity());
- NumElementsPtr =
- CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
- NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *allocPtr,
+ CharUnits cookieSize) {
+ // The element size is right-justified in the cookie.
+ llvm::Value *numElementsPtr = allocPtr;
+ CharUnits numElementsOffset =
+ cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes);
+ if (!numElementsOffset.isZero())
+ numElementsPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr,
+ numElementsOffset.getQuantity());
+
+ unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ numElementsPtr =
+ CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
+ return CGF.Builder.CreateLoad(numElementsPtr);
}
-CharUnits ARMCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
- if (!NeedsArrayCookie(expr))
- return CharUnits::Zero();
-
+CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
// On ARM, the cookie is always:
// struct array_cookie {
// std::size_t element_size; // element_size != 0
@@ -936,7 +866,7 @@ CharUnits ARMCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
// };
// TODO: what should we do if the allocated type actually wants
// greater alignment?
- return getContext().getTypeSizeInChars(getContext().getSizeType()) * 2;
+ return CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes);
}
llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
@@ -944,7 +874,7 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType) {
- assert(NeedsArrayCookie(expr));
+ assert(requiresArrayCookie(expr));
// NewPtr is a char*.
@@ -975,44 +905,18 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
CookieSize.getQuantity());
}
-void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
- llvm::Value *Ptr,
- const CXXDeleteExpr *expr,
- QualType ElementType,
- llvm::Value *&NumElements,
- llvm::Value *&AllocPtr,
- CharUnits &CookieSize) {
- // Derive a char* in the same address space as the pointer.
- unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
- llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
-
- // If we don't need an array cookie, bail out early.
- if (!NeedsArrayCookie(expr, ElementType)) {
- AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
- NumElements = 0;
- CookieSize = CharUnits::Zero();
- return;
- }
-
- QualType SizeTy = getContext().getSizeType();
- CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
- llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
-
- // The cookie size is always 2 * sizeof(size_t).
- CookieSize = 2 * SizeSize;
-
- // The allocated pointer is the input ptr, minus that amount.
- AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
- AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
- -CookieSize.getQuantity());
-
- // The number of elements is at offset sizeof(size_t) relative to that.
- llvm::Value *NumElementsPtr
- = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
- SizeSize.getQuantity());
- NumElementsPtr =
- CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
- NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *allocPtr,
+ CharUnits cookieSize) {
+ // The number of elements is at offset sizeof(size_t) relative to
+ // the allocated pointer.
+ llvm::Value *numElementsPtr
+ = CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes);
+
+ unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ numElementsPtr =
+ CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS));
+ return CGF.Builder.CreateLoad(numElementsPtr);
}
/*********************** Static local initialization **************************/
@@ -1200,3 +1104,60 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGF.EmitBlock(EndBlock);
}
+
+/// Register a global destructor using __cxa_atexit.
+static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // We're assuming that the destructor function is something we can
+ // reasonably call with the default CC. Go ahead and cast it to the
+ // right prototype.
+ llvm::Type *dtorTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
+
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, paramTys, false);
+
+ // Fetch the actual function.
+ llvm::Constant *atexit =
+ CGF.CGM.CreateRuntimeFunction(atexitTy, "__cxa_atexit");
+ if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
+ fn->setDoesNotThrow();
+
+ // Create a variable that binds the atexit to this shared object.
+ llvm::Constant *handle =
+ CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
+
+ llvm::Value *args[] = {
+ llvm::ConstantExpr::getBitCast(dtor, dtorTy),
+ llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
+ handle
+ };
+ CGF.Builder.CreateCall(atexit, args)->setDoesNotThrow();
+}
+
+/// Register a global destructor as best as we know how.
+void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Use __cxa_atexit if available.
+ if (CGM.getCodeGenOpts().CXAAtExit) {
+ return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr);
+ }
+
+ // In Apple kexts, we want to add a global destructor entry.
+ // FIXME: shouldn't this be guarded by some variable?
+ if (CGM.getContext().getLangOpts().AppleKext) {
+ // Generate a global destructor entry.
+ return CGM.AddCXXDtorEntry(dtor, addr);
+ }
+
+ CGF.registerGlobalDtorWithAtExit(dtor, addr);
+}
+
+/// Generate and emit virtual tables for the given class.
+void ItaniumCXXABI::EmitVTables(const CXXRecordDecl *Class) {
+ CGM.getVTables().GenerateClassData(CGM.getVTableLinkage(Class), Class);
+}
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 825e041..6a2925b 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -28,6 +28,8 @@ class MicrosoftCXXABI : public CGCXXABI {
public:
MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM) {}
+ StringRef GetPureVirtualCallName() { return "_purecall"; }
+
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType Type,
CanQualType &ResTy,
@@ -56,6 +58,13 @@ public:
// TODO: 'for base' flag
}
+ void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit);
+
+ void EmitVTables(const CXXRecordDecl *Class);
+
+
// ==== Notes on array cookies =========
//
// MSVC seems to only use cookies when the class has a destructor; a
@@ -78,17 +87,92 @@ public:
// delete[] p;
// }
// Whereas it prints "104" and "104" if you give A a destructor.
- void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
- const CXXDeleteExpr *expr,
- QualType ElementType, llvm::Value *&NumElements,
- llvm::Value *&AllocPtr, CharUnits &CookieSize) {
- CGF.CGM.ErrorUnsupported(expr, "don't know how to handle array cookies "
- "in the Microsoft C++ ABI");
- }
+
+ bool requiresArrayCookie(const CXXDeleteExpr *expr, QualType elementType);
+ bool requiresArrayCookie(const CXXNewExpr *expr);
+ CharUnits getArrayCookieSizeImpl(QualType type);
+ llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
+ llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *allocPtr,
+ CharUnits cookieSize);
};
}
+bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) {
+ // Microsoft seems to completely ignore the possibility of a
+ // two-argument usual deallocation function.
+ return elementType.isDestructedType();
+}
+
+bool MicrosoftCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
+ // Microsoft seems to completely ignore the possibility of a
+ // two-argument usual deallocation function.
+ return expr->getAllocatedType().isDestructedType();
+}
+
+CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
+ // The array cookie is always a size_t; we then pad that out to the
+ // alignment of the element type.
+ ASTContext &Ctx = getContext();
+ return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
+ Ctx.getTypeAlignInChars(type));
+}
+
+llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
+ llvm::Value *allocPtr,
+ CharUnits cookieSize) {
+ unsigned AS = cast<llvm::PointerType>(allocPtr->getType())->getAddressSpace();
+ llvm::Value *numElementsPtr =
+ CGF.Builder.CreateBitCast(allocPtr, CGF.SizeTy->getPointerTo(AS));
+ return CGF.Builder.CreateLoad(numElementsPtr);
+}
+
+llvm::Value* MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *newPtr,
+ llvm::Value *numElements,
+ const CXXNewExpr *expr,
+ QualType elementType) {
+ assert(requiresArrayCookie(expr));
+
+ // The size of the cookie.
+ CharUnits cookieSize = getArrayCookieSizeImpl(elementType);
+
+ // Compute an offset to the cookie.
+ llvm::Value *cookiePtr = newPtr;
+
+ // Write the number of elements into the appropriate slot.
+ unsigned AS = cast<llvm::PointerType>(newPtr->getType())->getAddressSpace();
+ llvm::Value *numElementsPtr
+ = CGF.Builder.CreateBitCast(cookiePtr, CGF.SizeTy->getPointerTo(AS));
+ CGF.Builder.CreateStore(numElements, numElementsPtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr,
+ cookieSize.getQuantity());
+}
+
+void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) {
+ // FIXME: this code was only tested for global initialization.
+ // Not sure whether we want thread-safe static local variables as VS
+ // doesn't make them thread-safe.
+
+ // Emit the initializer and add a global destructor if appropriate.
+ CGF.EmitCXXGlobalVarDeclInit(D, DeclPtr, PerformInit);
+}
+
+void MicrosoftCXXABI::EmitVTables(const CXXRecordDecl *Class) {
+ // FIXME: implement
+}
+
CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
return new MicrosoftCXXABI(CGM);
}
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 2b71fdd..9c23ed9 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -413,12 +413,18 @@ static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
+ enum Class {
+ Integer,
+ Float
+ };
+
static const unsigned MinABIStackAlignInBytes = 4;
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
bool IsMMXDisabled;
bool IsWin32FloatStructABI;
+ unsigned DefaultNumRegisterParameters;
static bool isRegisterSize(unsigned Size) {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
@@ -434,33 +440,31 @@ class X86_32ABIInfo : public ABIInfo {
/// \brief Return the alignment to use for the given type on the stack.
unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
-public:
-
- ABIArgInfo classifyReturnType(QualType RetTy,
+ Class classify(QualType Ty) const;
+ ABIArgInfo classifyReturnType(QualType RetTy,
unsigned callingConvention) const;
+ ABIArgInfo classifyArgumentTypeWithReg(QualType RetTy,
+ unsigned &FreeRegs) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
- virtual void computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
- FI.getCallingConvention());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
- }
+public:
+ virtual void computeInfo(CGFunctionInfo &FI) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w)
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w,
+ unsigned r)
: ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
- IsMMXDisabled(m), IsWin32FloatStructABI(w) {}
+ IsMMXDisabled(m), IsWin32FloatStructABI(w),
+ DefaultNumRegisterParameters(r) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- bool d, bool p, bool m, bool w)
- :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w)) {}
+ bool d, bool p, bool m, bool w, unsigned r)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
@@ -626,6 +630,10 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
+ return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
+}
+
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
@@ -643,7 +651,7 @@ static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
i != e; ++i) {
QualType FT = i->getType();
- if (FT->getAs<VectorType>() && Context.getTypeSize(FT) == 128)
+ if (isSSEVectorType(Context, FT))
return true;
if (isRecordWithSSEVectorType(Context, FT))
@@ -667,7 +675,8 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
}
// Otherwise, if the type contains an SSE vector type, the alignment is 16.
- if (Align >= 16 && isRecordWithSSEVectorType(getContext(), Ty))
+ if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
+ isRecordWithSSEVectorType(getContext(), Ty)))
return 16;
return MinABIStackAlignInBytes;
@@ -692,6 +701,57 @@ ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
return ABIArgInfo::getIndirect(StackAlign);
}
+X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
+ const Type *T = isSingleElementStruct(Ty, getContext());
+ if (!T)
+ T = Ty.getTypePtr();
+
+ if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
+ BuiltinType::Kind K = BT->getKind();
+ if (K == BuiltinType::Float || K == BuiltinType::Double)
+ return Float;
+ }
+ return Integer;
+}
+
+ABIArgInfo
+X86_32ABIInfo::classifyArgumentTypeWithReg(QualType Ty,
+ unsigned &FreeRegs) const {
+ // Common case first.
+ if (FreeRegs == 0)
+ return classifyArgumentType(Ty);
+
+ Class C = classify(Ty);
+ if (C == Float)
+ return classifyArgumentType(Ty);
+
+ unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (SizeInRegs == 0)
+ return classifyArgumentType(Ty);
+
+ if (SizeInRegs > FreeRegs) {
+ FreeRegs = 0;
+ return classifyArgumentType(Ty);
+ }
+ assert(SizeInRegs >= 1 && SizeInRegs <= 3);
+ FreeRegs -= SizeInRegs;
+
+ // If it is a simple scalar, keep the type so that we produce a cleaner IR.
+ ABIArgInfo Foo = classifyArgumentType(Ty);
+ if (Foo.isDirect() && !Foo.getDirectOffset() && !Foo.getPaddingType())
+ return ABIArgInfo::getDirectInReg(Foo.getCoerceToType());
+ if (Foo.isExtend())
+ return ABIArgInfo::getExtendInReg(Foo.getCoerceToType());
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+ llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ SmallVector<llvm::Type*, 3> Elements;
+ for (unsigned I = 0; I < SizeInRegs; ++I)
+ Elements.push_back(Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+ return ABIArgInfo::getDirectInReg(Result);
+}
+
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
// FIXME: Set alignment on indirect arguments.
if (isAggregateTypeForABI(Ty)) {
@@ -753,6 +813,28 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ FI.getCallingConvention());
+
+ unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() :
+ DefaultNumRegisterParameters;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect() && FreeRegs) {
+ --FreeRegs;
+ ABIArgInfo &Old = FI.getReturnInfo();
+ Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(),
+ Old.getIndirectByVal(),
+ Old.getIndirectRealign());
+ }
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentTypeWithReg(it->type, FreeRegs);
+}
+
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BPP = CGF.Int8PtrPtrTy;
@@ -1345,7 +1427,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// single eightbyte, each is classified separately. Each eightbyte gets
// initialized to class NO_CLASS.
Class FieldLo, FieldHi;
- uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base);
+ uint64_t Offset =
+ OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
classify(i->getType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
@@ -1584,7 +1667,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
// If the base is after the span we care about, ignore it.
- unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base);
+ unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
if (BaseOffset >= EndBit) continue;
unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
@@ -2411,6 +2494,64 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
return false;
}
+// PowerPC-64
+
+namespace {
+class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
+}
+
+bool
+PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Eight8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-76 are various 4-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 64, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Four8, 109, 113);
+
+ return false;
+}
//===----------------------------------------------------------------------===//
// ARM ABI Implementation
@@ -2559,7 +2700,8 @@ static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
// double, or 64-bit or 128-bit vectors.
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
if (BT->getKind() != BuiltinType::Float &&
- BT->getKind() != BuiltinType::Double)
+ BT->getKind() != BuiltinType::Double &&
+ BT->getKind() != BuiltinType::LongDouble)
return false;
} else if (const VectorType *VT = Ty->getAs<VectorType>()) {
unsigned VecSize = Context.getTypeSize(VT);
@@ -2615,19 +2757,23 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
}
}
+ // Support byval for ARM.
+ if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) ||
+ getContext().getTypeAlign(Ty) > 64) {
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ }
+
// Otherwise, pass by coercing to a structure of the appropriate size.
- //
- // FIXME: This is kind of nasty... but there isn't much choice because the ARM
- // backend doesn't support byval.
- // FIXME: This doesn't handle alignment > 64 bits.
llvm::Type* ElemTy;
unsigned SizeRegs;
- if (getContext().getTypeAlign(Ty) > 32) {
- ElemTy = llvm::Type::getInt64Ty(getVMContext());
- SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
- } else {
+ // FIXME: Try to match the types of the arguments more accurately where
+ // we can.
+ if (getContext().getTypeAlign(Ty) <= 32) {
ElemTy = llvm::Type::getInt32Ty(getVMContext());
SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ } else {
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
}
llvm::Type *STy =
@@ -2833,14 +2979,14 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
}
//===----------------------------------------------------------------------===//
-// PTX ABI Implementation
+// NVPTX ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
-class PTXABIInfo : public ABIInfo {
+class NVPTXABIInfo : public ABIInfo {
public:
- PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
@@ -2850,16 +2996,16 @@ public:
CodeGenFunction &CFG) const;
};
-class PTXTargetCodeGenInfo : public TargetCodeGenInfo {
+class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- PTXTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new PTXABIInfo(CGT)) {}
+ NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const;
};
-ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
+ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy))
@@ -2867,14 +3013,14 @@ ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect();
}
-ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const {
+ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty))
return ABIArgInfo::getIndirect(0);
return ABIArgInfo::getDirect();
}
-void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
+void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
@@ -2885,6 +3031,8 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
return;
// Calling convention as default by an ABI.
+ // We're still using the PTX_Kernel/PTX_Device calling conventions here,
+ // but we should switch to NVVM metadata later on.
llvm::CallingConv::ID DefaultCC;
const LangOptions &LangOpts = getContext().getLangOpts();
if (LangOpts.OpenCL || LangOpts.CUDA) {
@@ -2903,14 +3051,14 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
}
-llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CFG) const {
- llvm_unreachable("PTX does not support varargs");
+llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CFG) const {
+ llvm_unreachable("NVPTX does not support varargs");
}
-void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
- llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const{
+void NVPTXTargetCodeGenInfo::
+SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const{
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD) return;
@@ -3097,13 +3245,16 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
namespace {
class MipsABIInfo : public ABIInfo {
bool IsO32;
- unsigned MinABIStackAlignInBytes;
- llvm::Type* HandleAggregates(QualType Ty) const;
+ unsigned MinABIStackAlignInBytes, StackAlignInBytes;
+ void CoerceToIntArgs(uint64_t TySize,
+ SmallVector<llvm::Type*, 8> &ArgList) const;
+ llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
public:
MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
- ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8) {}
+ ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
+ StackAlignInBytes(IsO32 ? 8 : 16) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
@@ -3132,36 +3283,56 @@ public:
};
}
+void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
+ SmallVector<llvm::Type*, 8> &ArgList) const {
+ llvm::IntegerType *IntTy =
+ llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
+
+ // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
+ for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
+ ArgList.push_back(IntTy);
+
+ // If necessary, add one more integer type to ArgList.
+ unsigned R = TySize % (MinABIStackAlignInBytes * 8);
+
+ if (R)
+ ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+}
+
// In N32/64, an aligned double precision floating point field is passed in
// a register.
-llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const {
- if (IsO32)
- return 0;
+llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
+ SmallVector<llvm::Type*, 8> ArgList, IntArgList;
+
+ if (IsO32) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
if (Ty->isComplexType())
return CGT.ConvertType(Ty);
const RecordType *RT = Ty->getAs<RecordType>();
- // Unions are passed in integer registers.
- if (!RT || !RT->isStructureOrClassType())
- return 0;
+ // Unions/vectors are passed in integer registers.
+ if (!RT || !RT->isStructureOrClassType()) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
const RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- uint64_t StructSize = getContext().getTypeSize(Ty);
- assert(!(StructSize % 8) && "Size of structure must be multiple of 8.");
+ assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
uint64_t LastOffset = 0;
unsigned idx = 0;
llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
- SmallVector<llvm::Type*, 8> ArgList;
// Iterate over fields in the struct/class and check if there are any aligned
// double fields.
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i, ++idx) {
- const QualType Ty = (*i)->getType();
+ const QualType Ty = i->getType();
const BuiltinType *BT = Ty->getAs<BuiltinType>();
if (!BT || BT->getKind() != BuiltinType::Double)
@@ -3180,43 +3351,33 @@ llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const {
LastOffset = Offset + 64;
}
- // This struct/class doesn't have an aligned double field.
- if (!LastOffset)
- return 0;
-
- // Add ((StructSize - LastOffset) / 64) args of type i64.
- for (unsigned N = (StructSize - LastOffset) / 64; N; --N)
- ArgList.push_back(I64);
-
- // If the size of the remainder is not zero, add one more integer type to
- // ArgList.
- unsigned R = (StructSize - LastOffset) % 64;
- if (R)
- ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+ CoerceToIntArgs(TySize - LastOffset, IntArgList);
+ ArgList.append(IntArgList.begin(), IntArgList.end());
return llvm::StructType::get(getVMContext(), ArgList);
}
llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
- // Padding is inserted only for N32/64.
- if (IsO32)
- return 0;
+ assert((Offset % MinABIStackAlignInBytes) == 0);
+
+ if ((Align - 1) & Offset)
+ return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
- assert(Align <= 16 && "Alignment larger than 16 not handled.");
- return (Align == 16 && Offset & 0xf) ?
- llvm::IntegerType::get(getVMContext(), 64) : 0;
+ return 0;
}
ABIArgInfo
MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
uint64_t OrigOffset = Offset;
- uint64_t TySize =
- llvm::RoundUpToAlignment(getContext().getTypeSize(Ty), 64) / 8;
+ uint64_t TySize = getContext().getTypeSize(Ty);
uint64_t Align = getContext().getTypeAlign(Ty) / 8;
- Offset = llvm::RoundUpToAlignment(Offset, std::max(Align, (uint64_t)8));
- Offset += TySize;
- if (isAggregateTypeForABI(Ty)) {
+ Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
+ (uint64_t)StackAlignInBytes);
+ Offset = llvm::RoundUpToAlignment(Offset, Align);
+ Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
+
+ if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
// Ignore empty aggregates.
if (TySize == 0)
return ABIArgInfo::getIgnore();
@@ -3224,20 +3385,15 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
// Records with non trivial destructors/constructors should not be passed
// by value.
if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) {
- Offset = OrigOffset + 8;
+ Offset = OrigOffset + MinABIStackAlignInBytes;
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
- // If we have reached here, aggregates are passed either indirectly via a
- // byval pointer or directly by coercing to another structure type. In the
- // latter case, padding is inserted if the offset of the aggregate is
- // unaligned.
- llvm::Type *ResType = HandleAggregates(Ty);
-
- if (!ResType)
- return ABIArgInfo::getIndirect(0);
-
- return ABIArgInfo::getDirect(ResType, 0, getPaddingType(Align, OrigOffset));
+ // If we have reached here, aggregates are passed directly by coercing to
+ // another structure type. Padding is inserted if the offset of the
+ // aggregate is unaligned.
+ return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
+ getPaddingType(Align, OrigOffset));
}
// Treat an enum type as its underlying type.
@@ -3253,7 +3409,7 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
llvm::Type*
MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
const RecordType *RT = RetTy->getAs<RecordType>();
- SmallVector<llvm::Type*, 2> RTList;
+ SmallVector<llvm::Type*, 8> RTList;
if (RT && RT->isStructureOrClassType()) {
const RecordDecl *RD = RT->getDecl();
@@ -3272,12 +3428,12 @@ MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
for (; b != e; ++b) {
- const BuiltinType *BT = (*b)->getType()->getAs<BuiltinType>();
+ const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
if (!BT || !BT->isFloatingPoint())
break;
- RTList.push_back(CGT.ConvertType((*b)->getType()));
+ RTList.push_back(CGT.ConvertType(b->getType()));
}
if (b == e)
@@ -3288,11 +3444,7 @@ MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
}
}
- RTList.push_back(llvm::IntegerType::get(getVMContext(),
- std::min(Size, (uint64_t)64)));
- if (Size > 64)
- RTList.push_back(llvm::IntegerType::get(getVMContext(), Size - 64));
-
+ CoerceToIntArgs(Size, RTList);
return llvm::StructType::get(getVMContext(), RTList);
}
@@ -3302,11 +3454,15 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType() || Size == 0)
return ABIArgInfo::getIgnore();
- if (isAggregateTypeForABI(RetTy)) {
+ if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
if (Size <= 128) {
if (RetTy->isAnyComplexType())
return ABIArgInfo::getDirect();
+ // O32 returns integer vectors in registers.
+ if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
+ return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
+
if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
}
@@ -3327,7 +3483,7 @@ void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
RetInfo = classifyReturnType(FI.getReturnType());
// Check if a pointer to an aggregate is passed as a hidden argument.
- uint64_t Offset = RetInfo.isIndirect() ? 8 : 0;
+ uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
@@ -3634,10 +3790,12 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::ppc:
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
+ case llvm::Triple::ppc64:
+ return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
- case llvm::Triple::ptx32:
- case llvm::Triple::ptx64:
- return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types));
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
case llvm::Triple::mblaze:
return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
@@ -3653,8 +3811,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, true, true, DisableMMX, false));
+ new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false,
+ CodeGenOpts.NumRegisterParameters));
switch (Triple.getOS()) {
case llvm::Triple::Cygwin:
@@ -3663,19 +3821,22 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
+ case llvm::Triple::Bitrig:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, false, true, DisableMMX, false));
+ new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX,
+ false,
+ CodeGenOpts.NumRegisterParameters));
case llvm::Triple::Win32:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, false, true, DisableMMX, true));
+ new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true,
+ CodeGenOpts.NumRegisterParameters));
default:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, false, false, DisableMMX, false));
+ new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX,
+ false,
+ CodeGenOpts.NumRegisterParameters));
}
}
OpenPOWER on IntegriCloud