summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/CGBlocks.cpp2
-rw-r--r--lib/CodeGen/CGCall.cpp24
-rw-r--r--lib/CodeGen/CGClass.cpp4
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp8
-rw-r--r--lib/CodeGen/CGDecl.cpp102
-rw-r--r--lib/CodeGen/CGException.cpp203
-rw-r--r--lib/CodeGen/CGException.h92
-rw-r--r--lib/CodeGen/CGExpr.cpp2
-rw-r--r--lib/CodeGen/CGExprAgg.cpp4
-rw-r--r--lib/CodeGen/CGExprComplex.cpp25
-rw-r--r--lib/CodeGen/CGExprScalar.cpp9
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp3
-rw-r--r--lib/CodeGen/CGObjCMac.cpp70
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp177
-rw-r--r--lib/CodeGen/CodeGenFunction.h75
-rw-r--r--lib/CodeGen/CodeGenModule.h8
-rw-r--r--lib/CodeGen/Mangle.cpp253
-rw-r--r--lib/CodeGen/TargetInfo.cpp1
18 files changed, 883 insertions, 179 deletions
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 0d05a62..cb9e636 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -253,7 +253,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args,
FunctionType::ExtInfo());
- if (CGM.ReturnTypeUsesSret(FnInfo))
+ if (CGM.ReturnTypeUsesSRet(FnInfo))
flags |= BLOCK_USE_STRET;
}
const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 1632cb3..3d1e143 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -564,10 +564,28 @@ static void CreateCoercedStore(llvm::Value *Src,
/***/
-bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
+bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
return FI.getReturnInfo().isIndirect();
}
+bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
+ if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ default:
+ return false;
+ case BuiltinType::Float:
+ return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
+ case BuiltinType::Double:
+ return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
+ case BuiltinType::LongDouble:
+ return getContext().Target.useObjCFPRetForRealType(
+ TargetInfo::LongDouble);
+ }
+ }
+
+ return false;
+}
+
const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
const CGFunctionInfo &FI = getFunctionInfo(GD);
@@ -841,7 +859,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function::arg_iterator AI = Fn->arg_begin();
// Name the struct return argument.
- if (CGM.ReturnTypeUsesSret(FI)) {
+ if (CGM.ReturnTypeUsesSRet(FI)) {
AI->setName("agg.result");
++AI;
}
@@ -1116,7 +1134,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
- if (CGM.ReturnTypeUsesSret(CallInfo)) {
+ if (CGM.ReturnTypeUsesSRet(CallInfo)) {
llvm::Value *Value = ReturnValue.getValue();
if (!Value)
Value = CreateMemTemp(RetTy);
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index df5ea18..c50fe90 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -340,7 +340,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor()) {
// FIXME: Is this OK for C++0x delegating constructors?
- CodeGenFunction::CleanupBlock Cleanup(CGF, CodeGenFunction::EHCleanup);
+ CodeGenFunction::CleanupBlock Cleanup(CGF, EHCleanup);
CXXDestructorDecl *DD = BaseClassDecl->getDestructor();
CGF.EmitCXXDestructorCall(DD, Dtor_Base, isBaseVirtual, V);
@@ -534,7 +534,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (!RD->hasTrivialDestructor()) {
// FIXME: Is this OK for C++0x delegating constructors?
- CodeGenFunction::CleanupBlock Cleanup(CGF, CodeGenFunction::EHCleanup);
+ CodeGenFunction::CleanupBlock Cleanup(CGF, EHCleanup);
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index a3c4003..4e15895 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -537,11 +537,17 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIType ThisPtrType =
DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
- if (Method->getTypeQualifiers() && Qualifiers::Const)
+ unsigned Quals = Method->getTypeQualifiers();
+ if (Quals & Qualifiers::Const)
ThisPtrType =
DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_const_type,
Unit, "", Unit,
0, 0, 0, 0, 0, ThisPtrType);
+ if (Quals & Qualifiers::Volatile)
+ ThisPtrType =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_volatile_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0, ThisPtrType);
TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
Elts.push_back(ThisPtrType);
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 959a9ae..1a62ea9 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -388,6 +388,58 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
return Info.first;
}
+namespace {
+ struct CallArrayDtor : EHScopeStack::LazyCleanup {
+ CallArrayDtor(const CXXDestructorDecl *Dtor,
+ const ConstantArrayType *Type,
+ llvm::Value *Loc)
+ : Dtor(Dtor), Type(Type), Loc(Loc) {}
+
+ const CXXDestructorDecl *Dtor;
+ const ConstantArrayType *Type;
+ llvm::Value *Loc;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Type);
+ const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr);
+ CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr);
+ }
+ };
+
+ struct CallVarDtor : EHScopeStack::LazyCleanup {
+ CallVarDtor(const CXXDestructorDecl *Dtor,
+ llvm::Value *NRVOFlag,
+ llvm::Value *Loc)
+ : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {}
+
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *NRVOFlag;
+ llvm::Value *Loc;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // Along the exceptions path we always execute the dtor.
+ bool NRVO = !IsForEH && NRVOFlag;
+
+ llvm::BasicBlock *SkipDtorBB = 0;
+ if (NRVO) {
+ // If we exited via NRVO, we skip the destructor call.
+ llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
+ SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
+ llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
+ CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
+ CGF.EmitBlock(RunDtorBB);
+ }
+
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Loc);
+
+ if (NRVO) CGF.EmitBlock(SkipDtorBB);
+ }
+ };
+}
+
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
@@ -686,53 +738,11 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
if (const ConstantArrayType *Array =
getContext().getAsConstantArrayType(Ty)) {
- CleanupBlock Scope(*this, NormalCleanup);
-
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Loc, BasePtr);
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
-
- if (Exceptions) {
- Scope.beginEHCleanup();
-
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Loc, BasePtr);
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
- }
+ EHStack.pushLazyCleanup<CallArrayDtor>(NormalAndEHCleanup,
+ D, Array, Loc);
} else {
- // Normal destruction.
- CleanupBlock Scope(*this, NormalCleanup);
-
- llvm::BasicBlock *SkipDtor = 0;
- if (NRVO) {
- // If we exited via NRVO, we skip the destructor call.
- llvm::BasicBlock *NoNRVO = createBasicBlock("nrvo.unused");
- SkipDtor = createBasicBlock("nrvo.skipdtor");
- Builder.CreateCondBr(Builder.CreateLoad(NRVOFlag, "nrvo.val"),
- SkipDtor,
- NoNRVO);
- EmitBlock(NoNRVO);
- }
-
- // We don't call the destructor along the normal edge if we're
- // applying the NRVO.
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
- Loc);
-
- if (NRVO) EmitBlock(SkipDtor);
-
- // Along the exceptions path we always execute the dtor.
- if (Exceptions) {
- Scope.beginEHCleanup();
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
- Loc);
- }
+ EHStack.pushLazyCleanup<CallVarDtor>(NormalAndEHCleanup,
+ D, NRVOFlag, Loc);
}
}
}
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 085dddd..4980aad 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -62,12 +62,37 @@ EHScopeStack::getEnclosingEHCleanup(iterator it) const {
return stabilize(it);
return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
}
+ if (isa<EHLazyCleanupScope>(*it)) {
+ if (cast<EHLazyCleanupScope>(*it).isEHCleanup())
+ return stabilize(it);
+ return cast<EHLazyCleanupScope>(*it).getEnclosingEHCleanup();
+ }
++it;
} while (it != end());
return stable_end();
}
+void *EHScopeStack::pushLazyCleanup(CleanupKind Kind, size_t Size) {
+ assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
+ char *Buffer = allocate(EHLazyCleanupScope::getSizeForCleanupSize(Size));
+ bool IsNormalCleanup = Kind != EHCleanup;
+ bool IsEHCleanup = Kind != NormalCleanup;
+ EHLazyCleanupScope *Scope =
+ new (Buffer) EHLazyCleanupScope(IsNormalCleanup,
+ IsEHCleanup,
+ Size,
+ BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHCleanup);
+ if (IsNormalCleanup)
+ InnermostNormalCleanup = stable_begin();
+ if (IsEHCleanup)
+ InnermostEHCleanup = stable_begin();
+
+ return Scope->getCleanupBuffer();
+}
+
void EHScopeStack::pushCleanup(llvm::BasicBlock *NormalEntry,
llvm::BasicBlock *NormalExit,
llvm::BasicBlock *EHEntry,
@@ -86,11 +111,18 @@ void EHScopeStack::pushCleanup(llvm::BasicBlock *NormalEntry,
void EHScopeStack::popCleanup() {
assert(!empty() && "popping exception stack when not empty");
- assert(isa<EHCleanupScope>(*begin()));
- EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
- InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
- InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
- StartOfData += EHCleanupScope::getSize();
+ if (isa<EHLazyCleanupScope>(*begin())) {
+ EHLazyCleanupScope &Cleanup = cast<EHLazyCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += Cleanup.getAllocatedSize();
+ } else {
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += EHCleanupScope::getSize();
+ }
// Check whether we can shrink the branch-fixups stack.
if (!BranchFixups.empty()) {
@@ -144,7 +176,11 @@ void EHScopeStack::popNullFixups() {
assert(hasNormalCleanups());
EHScopeStack::iterator it = find(InnermostNormalCleanup);
- unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ unsigned MinSize;
+ if (isa<EHCleanupScope>(*it))
+ MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ else
+ MinSize = cast<EHLazyCleanupScope>(*it).getFixupDepth();
assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
while (BranchFixups.size() > MinSize &&
@@ -364,6 +400,33 @@ static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
return llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
}
+namespace {
+ /// A cleanup to free the exception object if its initialization
+ /// throws.
+ struct FreeExceptionCleanup : EHScopeStack::LazyCleanup {
+ FreeExceptionCleanup(llvm::Value *ShouldFreeVar,
+ llvm::Value *ExnLocVar)
+ : ShouldFreeVar(ShouldFreeVar), ExnLocVar(ExnLocVar) {}
+
+ llvm::Value *ShouldFreeVar;
+ llvm::Value *ExnLocVar;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
+
+ llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
+ "should-free-exnobj");
+ CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
+ CGF.EmitBlock(FreeBB);
+ llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal)
+ ->setDoesNotThrow();
+ CGF.EmitBlock(DoneBB);
+ }
+ };
+}
+
// Emits an exception expression into the given location. This
// differs from EmitAnyExprToMem only in that, if a final copy-ctor
// call is required, an exception within that copy ctor causes
@@ -388,22 +451,11 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
// Make sure the exception object is cleaned up if there's an
// exception during initialization.
- // FIXME: StmtExprs probably force this to include a non-EH
- // handler.
- {
- CodeGenFunction::CleanupBlock Cleanup(CGF, CodeGenFunction::EHCleanup);
- llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
- llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
-
- llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
- "should-free-exnobj");
- CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
- CGF.EmitBlock(FreeBB);
- llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
- CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal)
- ->setDoesNotThrow();
- CGF.EmitBlock(DoneBB);
- }
+ // FIXME: stmt expressions might require this to be a normal
+ // cleanup, too.
+ CGF.EHStack.pushLazyCleanup<FreeExceptionCleanup>(EHCleanup,
+ ShouldFreeVar,
+ ExnLocVar);
EHScopeStack::stable_iterator Cleanup = CGF.EHStack.stable_begin();
CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
@@ -598,13 +650,28 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
/// affect exception handling. Currently, the only non-EH scopes are
/// normal-only cleanup scopes.
static bool isNonEHScope(const EHScope &S) {
- return isa<EHCleanupScope>(S) && !cast<EHCleanupScope>(S).isEHCleanup();
+ switch (S.getKind()) {
+ case EHScope::Cleanup:
+ return !cast<EHCleanupScope>(S).isEHCleanup();
+ case EHScope::LazyCleanup:
+ return !cast<EHLazyCleanupScope>(S).isEHCleanup();
+ case EHScope::Filter:
+ case EHScope::Catch:
+ case EHScope::Terminate:
+ return false;
+ }
+
+ // Suppress warning.
+ return false;
}
llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
assert(EHStack.requiresLandingPad());
assert(!EHStack.empty());
+ if (!Exceptions)
+ return 0;
+
// Check the innermost scope for a cached landing pad. If this is
// a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
@@ -713,6 +780,12 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
I != E; ++I) {
switch (I->getKind()) {
+ case EHScope::LazyCleanup:
+ if (!HasEHCleanup)
+ HasEHCleanup = cast<EHLazyCleanupScope>(*I).isEHCleanup();
+ // We otherwise don't care about cleanups.
+ continue;
+
case EHScope::Cleanup:
if (!HasEHCleanup)
HasEHCleanup = cast<EHCleanupScope>(*I).isEHCleanup();
@@ -947,19 +1020,45 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
return LP;
}
+namespace {
+ /// A cleanup to call __cxa_end_catch. In many cases, the caught
+ /// exception type lets us state definitively that the thrown exception
+ /// type does not have a destructor. In particular:
+ /// - Catch-alls tell us nothing, so we have to conservatively
+ /// assume that the thrown exception might have a destructor.
+ /// - Catches by reference behave according to their base types.
+ /// - Catches of non-record types will only trigger for exceptions
+ /// of non-record types, which never have destructors.
+ /// - Catches of record types can trigger for arbitrary subclasses
+ /// of the caught type, so we have to assume the actual thrown
+ /// exception type might have a throwing destructor, even if the
+ /// caught type's destructor is trivial or nothrow.
+ struct CallEndCatch : EHScopeStack::LazyCleanup {
+ CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
+ bool MightThrow;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(getEndCatchFn(CGF), 0, 0);
+ }
+ };
+}
+
/// Emits a call to __cxa_begin_catch and enters a cleanup to call
/// __cxa_end_catch.
-static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, llvm::Value *Exn) {
+///
+/// \param EndMightThrow - true if __cxa_end_catch might throw
+static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
+ llvm::Value *Exn,
+ bool EndMightThrow) {
llvm::CallInst *Call = CGF.Builder.CreateCall(getBeginCatchFn(CGF), Exn);
Call->setDoesNotThrow();
- {
- CodeGenFunction::CleanupBlock EndCatchCleanup(CGF,
- CodeGenFunction::NormalAndEHCleanup);
-
- // __cxa_end_catch never throws, so this can just be a call.
- CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow();
- }
+ CGF.EHStack.pushLazyCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
return Call;
}
@@ -979,8 +1078,11 @@ static void InitCatchParam(CodeGenFunction &CGF,
// If we're catching by reference, we can just cast the object
// pointer to the appropriate pointer.
if (isa<ReferenceType>(CatchType)) {
+ bool EndCatchMightThrow = cast<ReferenceType>(CatchType)->getPointeeType()
+ ->isRecordType();
+
// __cxa_begin_catch returns the adjusted object pointer.
- llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn);
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
llvm::Value *ExnCast =
CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
CGF.Builder.CreateStore(ExnCast, ParamAddr);
@@ -991,7 +1093,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
bool IsComplex = false;
if (!CGF.hasAggregateLLVMType(CatchType) ||
(IsComplex = CatchType->isAnyComplexType())) {
- llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn);
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
// If the catch type is a pointer type, __cxa_begin_catch returns
// the pointer by value.
@@ -1026,7 +1128,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
if (RD->hasTrivialCopyConstructor()) {
- llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn);
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, true);
llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
CGF.EmitAggregateCopy(ParamAddr, Cast, CatchType);
return;
@@ -1059,7 +1161,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.EHStack.popTerminate();
// Finally we can call __cxa_begin_catch.
- CallBeginCatch(CGF, Exn);
+ CallBeginCatch(CGF, Exn, true);
}
/// Begins a catch statement by initializing the catch variable and
@@ -1092,7 +1194,7 @@ static void BeginCatch(CodeGenFunction &CGF,
VarDecl *CatchParam = S->getExceptionDecl();
if (!CatchParam) {
llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
- CallBeginCatch(CGF, Exn);
+ CallBeginCatch(CGF, Exn, true);
return;
}
@@ -1100,6 +1202,14 @@ static void BeginCatch(CodeGenFunction &CGF,
CGF.EmitLocalBlockVarDecl(*CatchParam, &InitCatchParam);
}
+namespace {
+ struct CallRethrow : EHScopeStack::LazyCleanup {
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.EmitCallOrInvoke(getReThrowFn(CGF), 0, 0);
+ }
+ };
+}
+
void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
unsigned NumHandlers = S.getNumHandlers();
EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
@@ -1140,12 +1250,10 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
BeginCatch(*this, C);
// If there's an implicit rethrow, push a normal "cleanup" to call
- // _cxa_rethrow. This needs to happen before _cxa_end_catch is
- // called.
- if (ImplicitRethrow) {
- CleanupBlock Rethrow(*this, NormalCleanup);
- EmitCallOrInvoke(getReThrowFn(*this), 0, 0);
- }
+ // _cxa_rethrow. This needs to happen before __cxa_end_catch is
+ // called, and so it is pushed after BeginCatch.
+ if (ImplicitRethrow)
+ EHStack.pushLazyCleanup<CallRethrow>(NormalCleanup);
// Perform the body of the catch.
EmitStmt(C->getHandlerBlock());
@@ -1213,13 +1321,11 @@ CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
// Enter a normal cleanup which will perform the @finally block.
{
- CodeGenFunction::CleanupBlock
- NormalCleanup(*this, CodeGenFunction::NormalCleanup);
+ CodeGenFunction::CleanupBlock Cleanup(*this, NormalCleanup);
// Enter a cleanup to call the end-catch function if one was provided.
if (EndCatchFn) {
- CodeGenFunction::CleanupBlock
- FinallyExitCleanup(CGF, CodeGenFunction::NormalAndEHCleanup);
+ CodeGenFunction::CleanupBlock FinallyExitCleanup(CGF, NormalAndEHCleanup);
llvm::BasicBlock *EndCatchBB = createBasicBlock("finally.endcatch");
llvm::BasicBlock *CleanupContBB = createBasicBlock("finally.cleanup.cont");
@@ -1228,7 +1334,7 @@ CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
Builder.CreateLoad(ForEHVar, "finally.endcatch");
Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
EmitBlock(EndCatchBB);
- Builder.CreateCall(EndCatchFn)->setDoesNotThrow();
+ EmitCallOrInvoke(EndCatchFn, 0, 0); // catch-all, so might throw
EmitBlock(CleanupContBB);
}
@@ -1435,3 +1541,6 @@ CodeGenFunction::CleanupBlock::~CleanupBlock() {
CGF.Builder.restoreIP(SavedIP);
}
+EHScopeStack::LazyCleanup::~LazyCleanup() {
+ llvm_unreachable("LazyCleanup is indestructable");
+}
diff --git a/lib/CodeGen/CGException.h b/lib/CodeGen/CGException.h
index 8755dca..80739cd 100644
--- a/lib/CodeGen/CGException.h
+++ b/lib/CodeGen/CGException.h
@@ -31,13 +31,13 @@ namespace CodeGen {
class EHScope {
llvm::BasicBlock *CachedLandingPad;
- unsigned K : 2;
+ unsigned K : 3;
protected:
- enum { BitsRemaining = 30 };
+ enum { BitsRemaining = 29 };
public:
- enum Kind { Cleanup, Catch, Terminate, Filter };
+ enum Kind { Cleanup, LazyCleanup, Catch, Terminate, Filter };
EHScope(Kind K) : CachedLandingPad(0), K(K) {}
@@ -127,6 +127,87 @@ public:
}
};
+/// A cleanup scope which generates the cleanup blocks lazily.
+class EHLazyCleanupScope : public EHScope {
+ /// Whether this cleanup needs to be run along normal edges.
+ bool IsNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ bool IsEHCleanup : 1;
+
+ /// The amount of extra storage needed by the LazyCleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned CleanupSize : 12;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : BitsRemaining - 14;
+
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *NormalBlock;
+
+ /// The dual entry/exit block along the EH edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *EHBlock;
+
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t Size) {
+ return sizeof(EHLazyCleanupScope) + Size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHLazyCleanupScope) + CleanupSize;
+ }
+
+ EHLazyCleanupScope(bool IsNormal, bool IsEH, unsigned CleanupSize,
+ unsigned FixupDepth,
+ EHScopeStack::stable_iterator EnclosingNormal,
+ EHScopeStack::stable_iterator EnclosingEH)
+ : EHScope(EHScope::LazyCleanup),
+ IsNormalCleanup(IsNormal), IsEHCleanup(IsEH),
+ CleanupSize(CleanupSize), FixupDepth(FixupDepth),
+ EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
+ NormalBlock(0), EHBlock(0)
+ {}
+
+ bool isNormalCleanup() const { return IsNormalCleanup; }
+ llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
+ void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
+
+ bool isEHCleanup() const { return IsEHCleanup; }
+ llvm::BasicBlock *getEHBlock() const { return EHBlock; }
+ void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
+
+ unsigned getFixupDepth() const { return FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+ EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
+ return EnclosingEH;
+ }
+
+ size_t getCleanupSize() const { return CleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::LazyCleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::LazyCleanup*>(getCleanupBuffer());
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return (Scope->getKind() == LazyCleanup);
+ }
+};
+
/// A scope which needs to execute some code if we try to unwind ---
/// either normally, via the EH mechanism, or both --- through it.
class EHCleanupScope : public EHScope {
@@ -267,6 +348,11 @@ public:
static_cast<const EHFilterScope*>(get())->getNumFilters());
break;
+ case EHScope::LazyCleanup:
+ Ptr += static_cast<const EHLazyCleanupScope*>(get())
+ ->getAllocatedSize();
+ break;
+
case EHScope::Cleanup:
Ptr += EHCleanupScope::getSize();
break;
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 0426a60..43bab9f 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -1816,7 +1816,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
return LValue::MakeAddr(Derived, MakeQualifiers(E->getType()));
}
- case CastExpr::CK_BitCast: {
+ case CastExpr::CK_LValueBitCast: {
// This must be a reinterpret_cast (or c-style equivalent).
const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 20722f7..219a5f9 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -307,6 +307,10 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
break;
}
+ case CastExpr::CK_LValueBitCast:
+ llvm_unreachable("there are no lvalue bit-casts on aggregates");
+ break;
+
case CastExpr::CK_BitCast: {
// This must be a member function pointer cast.
Visit(E->getSubExpr());
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 90b6446..0927319 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -131,14 +131,14 @@ public:
// FIXME: CompoundLiteralExpr
- ComplexPairTy EmitCast(Expr *Op, QualType DestTy);
+ ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy);
ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
// Unlike for scalars, we don't have to worry about function->ptr demotion
// here.
- return EmitCast(E->getSubExpr(), E->getType());
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCastExpr(CastExpr *E) {
- return EmitCast(E->getSubExpr(), E->getType());
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCallExpr(const CallExpr *E);
ComplexPairTy VisitStmtExpr(const StmtExpr *E);
@@ -339,11 +339,22 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
return Val;
}
-ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
+ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
+ QualType DestTy) {
// Two cases here: cast from (complex to complex) and (scalar to complex).
if (Op->getType()->isAnyComplexType())
return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+ // FIXME: We should be looking at all of the cast kinds here, not
+ // cherry-picking the ones we have test cases for.
+ if (CK == CastExpr::CK_LValueBitCast) {
+ llvm::Value *V = CGF.EmitLValue(Op).getAddress();
+ V = Builder.CreateBitCast(V,
+ CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
+ // FIXME: Are the qualifiers correct here?
+ return EmitLoadOfComplex(V, DestTy.isVolatileQualified());
+ }
+
// C99 6.3.1.7: When a value of real type is converted to a complex type, the
// real part of the complex result value is determined by the rules of
// conversion to the corresponding real type and the imaginary part of the
@@ -521,7 +532,7 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
// improve codegen a little. It is possible for the RHS to be complex or
// scalar.
OpInfo.Ty = E->getComputationResultType();
- OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty);
+ OpInfo.RHS = EmitCast(CastExpr::CK_Unknown, E->getRHS(), OpInfo.Ty);
LValue LHS = CGF.EmitLValue(E->getLHS());
// We know the LHS is a complex lvalue.
@@ -572,8 +583,8 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
TestAndClearIgnoreImag();
bool ignreal = TestAndClearIgnoreRealAssign();
bool ignimag = TestAndClearIgnoreImagAssign();
- assert(CGF.getContext().getCanonicalType(E->getLHS()->getType()) ==
- CGF.getContext().getCanonicalType(E->getRHS()->getType()) &&
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType()) &&
"Invalid assignment");
// Emit the RHS.
ComplexPairTy Val = Visit(E->getRHS());
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 1ebc2c5..ef38209 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -925,6 +925,15 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
//assert(0 && "Unknown cast kind!");
break;
+ case CastExpr::CK_LValueBitCast: {
+ Value *V = EmitLValue(E).getAddress();
+ V = Builder.CreateBitCast(V,
+ ConvertType(CGF.getContext().getPointerType(DestTy)));
+ // FIXME: Are the qualifiers correct here?
+ return EmitLoadOfLValue(LValue::MakeAddr(V, CGF.MakeQualifiers(DestTy)),
+ DestTy);
+ }
+
case CastExpr::CK_AnyPointerToObjCPointerCast:
case CastExpr::CK_AnyPointerToBlockPointerCast:
case CastExpr::CK_BitCast: {
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index c9da348..f3c80bc 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -1871,8 +1871,7 @@ void CGObjCGNU::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Register an all-paths cleanup to release the lock.
{
- CodeGenFunction::CleanupBlock
- ReleaseScope(CGF, CodeGenFunction::NormalAndEHCleanup);
+ CodeGenFunction::CleanupBlock ReleaseScope(CGF, NormalAndEHCleanup);
llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 0a766d5..01ead9e 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -32,6 +32,7 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
#include <cstdio>
@@ -1649,29 +1650,17 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
"Result type mismatch!");
llvm::Constant *Fn = NULL;
- if (CGM.ReturnTypeUsesSret(FnInfo)) {
+ if (CGM.ReturnTypeUsesSRet(FnInfo)) {
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
: ObjCTypes.getSendStretFn(IsSuper);
- } else if (ResultType->isRealFloatingType()) {
- if (ObjCABI == 2) {
- if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
- BuiltinType::Kind k = BT->getKind();
- Fn = (k == BuiltinType::LongDouble) ? ObjCTypes.getSendFpretFn2(IsSuper)
- : ObjCTypes.getSendFn2(IsSuper);
- } else {
- Fn = ObjCTypes.getSendFn2(IsSuper);
- }
- } else
- // FIXME. This currently matches gcc's API for x86-32. May need to change
- // for others if we have their API.
- Fn = ObjCTypes.getSendFpretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper)
+ : ObjCTypes.getSendFpretFn(IsSuper);
} else {
Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
: ObjCTypes.getSendFn(IsSuper);
}
- assert(Fn && "EmitLegacyMessageSend - unknown API");
- Fn = llvm::ConstantExpr::getBitCast(Fn,
- llvm::PointerType::getUnqual(FTy));
+ Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy));
return CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
}
@@ -2697,8 +2686,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Push a normal cleanup to leave the try scope.
{
- CodeGenFunction::CleanupBlock
- FinallyScope(CGF, CodeGenFunction::NormalCleanup);
+ CodeGenFunction::CleanupBlock FinallyScope(CGF, NormalCleanup);
// Check whether we need to call objc_exception_try_exit.
// In optimized code, this branch will always be folded.
@@ -5295,7 +5283,7 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
FunctionType::ExtInfo());
llvm::Constant *Fn = 0;
std::string Name("\01l_");
- if (CGM.ReturnTypeUsesSret(FnInfo)) {
+ if (CGM.ReturnTypeUsesSRet(FnInfo)) {
#if 0
// unlike what is documented. gcc never generates this API!!
if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
@@ -5312,14 +5300,9 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
Fn = ObjCTypes.getMessageSendStretFixupFn();
Name += "objc_msgSend_stret_fixup";
}
- } else if (!IsSuper && ResultType->isRealFloatingType()) {
- if (ResultType->isSpecificBuiltinType(BuiltinType::LongDouble)) {
- Fn = ObjCTypes.getMessageSendFpretFixupFn();
- Name += "objc_msgSend_fpret_fixup";
- } else {
- Fn = ObjCTypes.getMessageSendFixupFn();
- Name += "objc_msgSend_fixup";
- }
+ } else if (!IsSuper && CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = ObjCTypes.getMessageSendFpretFixupFn();
+ Name += "objc_msgSend_fpret_fixup";
} else {
#if 0
// unlike what is documented. gcc never generates this API!!
@@ -5693,8 +5676,7 @@ CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Register an all-paths cleanup to release the lock.
{
- CodeGenFunction::CleanupBlock
- ReleaseScope(CGF, CodeGenFunction::NormalAndEHCleanup);
+ CodeGenFunction::CleanupBlock ReleaseScope(CGF, NormalAndEHCleanup);
CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
->setDoesNotThrow();
@@ -5714,6 +5696,22 @@ namespace {
llvm::BasicBlock *Block;
llvm::Value *TypeInfo;
};
+
+ struct CallObjCEndCatch : EHScopeStack::LazyCleanup {
+ CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
+ MightThrow(MightThrow), Fn(Fn) {}
+ bool MightThrow;
+ llvm::Value *Fn;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(Fn, 0, 0);
+ }
+ };
}
void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
@@ -5803,14 +5801,10 @@ void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
Exn->setDoesNotThrow();
// Add a cleanup to leave the catch.
- {
- CodeGenFunction::CleanupBlock
- EndCatchBlock(CGF, CodeGenFunction::NormalAndEHCleanup);
-
- // __objc_end_catch never throws.
- CGF.Builder.CreateCall(ObjCTypes.getObjCEndCatchFn())
- ->setDoesNotThrow();
- }
+ bool EndCatchMightThrow = (Handler.Variable == 0);
+ CGF.EHStack.pushLazyCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
+ EndCatchMightThrow,
+ ObjCTypes.getObjCEndCatchFn());
// Bind the catch parameter if it exists.
if (const VarDecl *CatchParam = Handler.Variable) {
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 5e505c2..eb6c436 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -825,11 +825,168 @@ static void SimplifyCleanupEdges(CodeGenFunction &CGF,
SimplifyCleanupEntry(CGF, Entry);
}
+static void EmitLazyCleanup(CodeGenFunction &CGF,
+ EHScopeStack::LazyCleanup *Fn,
+ bool ForEH) {
+ if (ForEH) CGF.EHStack.pushTerminate();
+ Fn->Emit(CGF, ForEH);
+ if (ForEH) CGF.EHStack.popTerminate();
+ assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
+}
+
+static void SplitAndEmitLazyCleanup(CodeGenFunction &CGF,
+ EHScopeStack::LazyCleanup *Fn,
+ bool ForEH,
+ llvm::BasicBlock *Entry) {
+ assert(Entry && "no entry block for cleanup");
+
+ // Remove the switch and load from the end of the entry block.
+ llvm::Instruction *Switch = &Entry->getInstList().back();
+ Entry->getInstList().remove(Switch);
+ assert(isa<llvm::SwitchInst>(Switch));
+ llvm::Instruction *Load = &Entry->getInstList().back();
+ Entry->getInstList().remove(Load);
+ assert(isa<llvm::LoadInst>(Load));
+
+ assert(Entry->getInstList().empty() &&
+ "lazy cleanup block not empty after removing load/switch pair?");
+
+ // Emit the actual cleanup at the end of the entry block.
+ CGF.Builder.SetInsertPoint(Entry);
+ EmitLazyCleanup(CGF, Fn, ForEH);
+
+ // Put the load and switch at the end of the exit block.
+ llvm::BasicBlock *Exit = CGF.Builder.GetInsertBlock();
+ Exit->getInstList().push_back(Load);
+ Exit->getInstList().push_back(Switch);
+
+ // Clean up the edges if possible.
+ SimplifyCleanupEdges(CGF, Entry, Exit);
+
+ CGF.Builder.ClearInsertionPoint();
+}
+
+static void PopLazyCleanupBlock(CodeGenFunction &CGF) {
+ assert(isa<EHLazyCleanupScope>(*CGF.EHStack.begin()) && "top not a cleanup!");
+ EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*CGF.EHStack.begin());
+ assert(Scope.getFixupDepth() <= CGF.EHStack.getNumBranchFixups());
+
+ // Check whether we need an EH cleanup. This is only true if we've
+ // generated a lazy EH cleanup block.
+ llvm::BasicBlock *EHEntry = Scope.getEHBlock();
+ bool RequiresEHCleanup = (EHEntry != 0);
+
+ // Check the three conditions which might require a normal cleanup:
+
+ // - whether there are branch fix-ups through this cleanup
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = CGF.EHStack.getNumBranchFixups() != FixupDepth;
+
+ // - whether control has already been threaded through this cleanup
+ llvm::BasicBlock *NormalEntry = Scope.getNormalBlock();
+ bool HasExistingBranches = (NormalEntry != 0);
+
+ // - whether there's a fallthrough
+ llvm::BasicBlock *FallthroughSource = CGF.Builder.GetInsertBlock();
+ bool HasFallthrough = (FallthroughSource != 0);
+
+ bool RequiresNormalCleanup = false;
+ if (Scope.isNormalCleanup() &&
+ (HasFixups || HasExistingBranches || HasFallthrough)) {
+ RequiresNormalCleanup = true;
+ }
+
+ // If we don't need the cleanup at all, we're done.
+ if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+ CGF.EHStack.popCleanup();
+ assert(CGF.EHStack.getNumBranchFixups() == 0 ||
+ CGF.EHStack.hasNormalCleanups());
+ return;
+ }
+
+ // Copy the cleanup emission data out. Note that SmallVector
+ // guarantees maximal alignment for its buffer regardless of its
+ // type parameter.
+ llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
+ CleanupBuffer.reserve(Scope.getCleanupSize());
+ memcpy(CleanupBuffer.data(),
+ Scope.getCleanupBuffer(), Scope.getCleanupSize());
+ CleanupBuffer.set_size(Scope.getCleanupSize());
+ EHScopeStack::LazyCleanup *Fn =
+ reinterpret_cast<EHScopeStack::LazyCleanup*>(CleanupBuffer.data());
+
+ // We're done with the scope; pop it off so we can emit the cleanups.
+ CGF.EHStack.popCleanup();
+
+ if (RequiresNormalCleanup) {
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (HasFallthrough && !HasFixups && !HasExistingBranches) {
+ EmitLazyCleanup(CGF, Fn, /*ForEH*/ false);
+
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+ } else {
+ // Force the entry block to exist.
+ if (!HasExistingBranches) {
+ NormalEntry = CGF.createBasicBlock("cleanup");
+ CreateCleanupSwitch(CGF, NormalEntry);
+ }
+
+ CGF.EmitBlock(NormalEntry);
+
+ // Thread the fallthrough edge through the (momentarily trivial)
+ // cleanup.
+ llvm::BasicBlock *FallthroughDestination = 0;
+ if (HasFallthrough) {
+ assert(isa<llvm::BranchInst>(FallthroughSource->getTerminator()));
+ FallthroughDestination = CGF.createBasicBlock("cleanup.cont");
+
+ BranchFixup Fix;
+ Fix.Destination = FallthroughDestination;
+ Fix.LatestBranch = FallthroughSource->getTerminator();
+ Fix.LatestBranchIndex = 0;
+ Fix.Origin = Fix.LatestBranch;
+
+ // Restore fixup invariant. EmitBlock added a branch to the
+ // cleanup which we need to redirect to the destination.
+ cast<llvm::BranchInst>(Fix.LatestBranch)
+ ->setSuccessor(0, Fix.Destination);
+
+ ThreadFixupThroughCleanup(CGF, Fix, NormalEntry, NormalEntry);
+ }
+
+ // Thread any "real" fixups we need to thread.
+ for (unsigned I = FixupDepth, E = CGF.EHStack.getNumBranchFixups();
+ I != E; ++I)
+ if (CGF.EHStack.getBranchFixup(I).Destination)
+ ThreadFixupThroughCleanup(CGF, CGF.EHStack.getBranchFixup(I),
+ NormalEntry, NormalEntry);
+
+ SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ false, NormalEntry);
+
+ if (HasFallthrough)
+ CGF.EmitBlock(FallthroughDestination);
+ }
+ }
+
+ // Emit the EH cleanup if required.
+ if (RequiresEHCleanup) {
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(EHEntry);
+ SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ true, EHEntry);
+ CGF.Builder.restoreIP(SavedIP);
+ }
+}
+
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
void CodeGenFunction::PopCleanupBlock() {
assert(!EHStack.empty() && "cleanup stack is empty!");
+ if (isa<EHLazyCleanupScope>(*EHStack.begin()))
+ return PopLazyCleanupBlock(*this);
+
assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
@@ -1007,6 +1164,16 @@ void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
if (Scope.isNormalCleanup())
ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(),
Scope.getNormalExit());
+ } else if (isa<EHLazyCleanupScope>(*I)) {
+ EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
+ if (Scope.isNormalCleanup()) {
+ llvm::BasicBlock *Block = Scope.getNormalBlock();
+ if (!Block) {
+ Block = createBasicBlock("cleanup");
+ Scope.setNormalBlock(Block);
+ }
+ ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
+ }
}
}
@@ -1046,6 +1213,16 @@ void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) {
if (Scope.isEHCleanup())
ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(),
Scope.getEHExit());
+ } else if (isa<EHLazyCleanupScope>(*I)) {
+ EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
+ if (Scope.isEHCleanup()) {
+ llvm::BasicBlock *Block = Scope.getEHBlock();
+ if (!Block) {
+ Block = createBasicBlock("eh.cleanup");
+ Scope.setEHBlock(Block);
+ }
+ ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
+ }
}
}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 26fb882..5ee3db0 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -95,6 +95,8 @@ struct BranchFixup {
unsigned LatestBranchIndex;
};
+enum CleanupKind { NormalAndEHCleanup, EHCleanup, NormalCleanup };
+
/// A stack of scopes which respond to exceptions, including cleanups
/// and catch blocks.
class EHScopeStack {
@@ -123,6 +125,33 @@ public:
}
};
+ /// A lazy cleanup. Subclasses must be POD-like: cleanups will
+ /// not be destructed, and they will be allocated on the cleanup
+ /// stack and freely copied and moved around.
+ ///
+ /// LazyCleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class LazyCleanup {
+ public:
+ // Anchor the construction vtable. We use the destructor because
+ // gcc gives an obnoxious warning if there are virtual methods
+ // with an accessible non-virtual destructor. Unfortunately,
+ // declaring this destructor makes it non-trivial, but there
+ // doesn't seem to be any other way around this warning.
+ //
+ // This destructor will never be called.
+ virtual ~LazyCleanup();
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param IsForEHCleanup true if this is for an EH cleanup, false
+ /// if for a normal cleanup.
+ virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
+ };
+
private:
// The implementation for this class is in CGException.h and
// CGException.cpp; the definition is here because it's used as a
@@ -171,6 +200,8 @@ private:
void popNullFixups();
+ void *pushLazyCleanup(CleanupKind K, size_t DataSize);
+
public:
EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
InnermostNormalCleanup(stable_end()),
@@ -178,6 +209,48 @@ public:
CatchDepth(0) {}
~EHScopeStack() { delete[] StartOfBuffer; }
+ // Variadic templates would make this not terrible.
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T>
+ void pushLazyCleanup(CleanupKind Kind) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T();
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0, a1);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0, a1, a2);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2, class A3>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
+ (void) Obj;
+ }
+
/// Push a cleanup on the stack.
void pushCleanup(llvm::BasicBlock *NormalEntry,
llvm::BasicBlock *NormalExit,
@@ -375,8 +448,6 @@ public:
llvm::Constant *RethrowFn);
void ExitFinallyBlock(FinallyInfo &FinallyInfo);
- enum CleanupKind { NormalAndEHCleanup, EHCleanup, NormalCleanup };
-
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
/// given address. Does nothing if T is not a C++ class type with a
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index a1ea0ec..27f15fc 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -444,9 +444,13 @@ public:
/// which only apply to a function definintion.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
- /// ReturnTypeUsesSret - Return true iff the given type uses 'sret' when used
+ /// ReturnTypeUsesSRet - Return true iff the given type uses 'sret' when used
/// as a return type.
- bool ReturnTypeUsesSret(const CGFunctionInfo &FI);
+ bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
+
+ /// ReturnTypeUsesSret - Return true iff the given type uses 'fpret' when used
+ /// as a return type.
+ bool ReturnTypeUsesFPRet(QualType ResultType);
/// ConstructAttributeList - Get the LLVM attributes and calling convention to
/// use for a particular function type.
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
index 2ae1919..30ee541 100644
--- a/lib/CodeGen/Mangle.cpp
+++ b/lib/CodeGen/Mangle.cpp
@@ -169,7 +169,9 @@ public:
void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z");
void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
+ void mangleNumber(const llvm::APSInt &I);
void mangleNumber(int64_t Number);
+ void mangleFloat(const llvm::APFloat &F);
void mangleFunctionEncoding(const FunctionDecl *FD);
void mangleName(const NamedDecl *ND);
void mangleType(QualType T);
@@ -230,6 +232,7 @@ private:
#include "clang/AST/TypeNodes.def"
void mangleType(const TagType*);
+ void mangleType(TemplateName);
void mangleBareFunctionType(const FunctionType *T,
bool MangleReturnType);
@@ -526,6 +529,21 @@ void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
addSubstitution(Template);
}
+void CXXNameMangler::mangleFloat(const llvm::APFloat &F) {
+ // TODO: avoid this copy with careful stream management.
+ llvm::SmallString<20> Buffer;
+ F.bitcastToAPInt().toString(Buffer, 16, false);
+ Out.write(Buffer.data(), Buffer.size());
+}
+
+void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
+ if (Value.isSigned() && Value.isNegative()) {
+ Out << 'n';
+ Value.abs().print(Out, true);
+ } else
+ Value.print(Out, Value.isSigned());
+}
+
void CXXNameMangler::mangleNumber(int64_t Number) {
// <number> ::= [n] <non-negative decimal integer>
if (Number < 0) {
@@ -939,6 +957,53 @@ void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
addSubstitution(ND);
}
+/// Mangles a template name under the production <type>. Required for
+/// template template arguments.
+/// <type> ::= <class-enum-type>
+/// ::= <template-param>
+/// ::= <substitution>
+void CXXNameMangler::mangleType(TemplateName TN) {
+ if (mangleSubstitution(TN))
+ return;
+
+ TemplateDecl *TD = 0;
+
+ switch (TN.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
+ goto HaveDecl;
+
+ case TemplateName::Template:
+ TD = TN.getAsTemplateDecl();
+ goto HaveDecl;
+
+ HaveDecl:
+ if (isa<TemplateTemplateParmDecl>(TD))
+ mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex());
+ else
+ mangleName(TD);
+ break;
+
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("can't mangle an overloaded template name as a <type>");
+ break;
+
+ case TemplateName::DependentTemplate: {
+ const DependentTemplateName *Dependent = TN.getAsDependentTemplateName();
+ assert(Dependent->isIdentifier());
+
+ // <class-enum-type> ::= <name>
+ // <name> ::= <nested-name>
+ mangleUnresolvedScope(Dependent->getQualifier());
+ mangleSourceName(Dependent->getIdentifier());
+ break;
+ }
+
+ }
+
+ addSubstitution(TN);
+}
+
void
CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
switch (OO) {
@@ -1301,8 +1366,6 @@ void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
mangleTemplateParameter(T->getIndex());
}
-// FIXME: <type> ::= <template-template-param> <template-args>
-
// <type> ::= P <type> # pointer-to
void CXXNameMangler::mangleType(const PointerType *T) {
Out << 'P';
@@ -1467,11 +1530,7 @@ void CXXNameMangler::mangleIntegerLiteral(QualType T,
// Boolean values are encoded as 0/1.
Out << (Value.getBoolValue() ? '1' : '0');
} else {
- if (Value.isSigned() && Value.isNegative()) {
- Out << 'n';
- Value.abs().print(Out, true);
- } else
- Value.print(Out, Value.isSigned());
+ mangleNumber(Value);
}
Out << 'E';
@@ -1535,10 +1594,44 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
#define STMT(Type, Base) \
case Expr::Type##Class:
#include "clang/AST/StmtNodes.inc"
+ // fallthrough
+
+ // These all can only appear in local or variable-initialization
+ // contexts and so should never appear in a mangling.
+ case Expr::AddrLabelExprClass:
+ case Expr::BlockDeclRefExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::InitListExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::CXXScalarValueInitExprClass:
llvm_unreachable("unexpected statement kind");
break;
- default: {
+ // FIXME: invent manglings for all these.
+ case Expr::BlockExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::ChooseExprClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCImplicitSetterGetterRefExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCSuperExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::StmtExprClass:
+ case Expr::TypesCompatibleExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::VAArgExprClass: {
// As bad as this diagnostic is, it's better than crashing.
Diagnostic &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
@@ -1550,6 +1643,11 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::CXXDefaultArgExprClass:
+ mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr());
+ break;
+
+ case Expr::CXXMemberCallExprClass: // fallthrough
case Expr::CallExprClass: {
const CallExpr *CE = cast<CallExpr>(E);
Out << "cl";
@@ -1560,6 +1658,26 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::CXXNewExprClass: {
+ // Proposal from David Vandervoorde, 2010.06.30
+ const CXXNewExpr *New = cast<CXXNewExpr>(E);
+ if (New->isGlobalNew()) Out << "gs";
+ Out << (New->isArray() ? "na" : "nw");
+ for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(),
+ E = New->placement_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ Out << '_';
+ mangleType(New->getAllocatedType());
+ if (New->hasInitializer()) {
+ Out << "pi";
+ for (CXXNewExpr::const_arg_iterator I = New->constructor_arg_begin(),
+ E = New->constructor_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ }
+ Out << 'E';
+ break;
+ }
+
case Expr::MemberExprClass: {
const MemberExpr *ME = cast<MemberExpr>(E);
mangleMemberExpr(ME->getBase(), ME->isArrow(),
@@ -1633,6 +1751,43 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::CXXThrowExprClass: {
+ const CXXThrowExpr *TE = cast<CXXThrowExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TE->getSubExpr()) {
+ Out << "tw";
+ mangleExpression(TE->getSubExpr());
+ } else {
+ Out << "tr";
+ }
+ break;
+ }
+
+ case Expr::CXXTypeidExprClass: {
+ const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TIE->isTypeOperand()) {
+ Out << "ti";
+ mangleType(TIE->getTypeOperand());
+ } else {
+ Out << "te";
+ mangleExpression(TIE->getExprOperand());
+ }
+ break;
+ }
+
+ case Expr::CXXDeleteExprClass: {
+ const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (DE->isGlobalDelete()) Out << "gs";
+ Out << (DE->isArrayForm() ? "da" : "dl");
+ mangleExpression(DE->getArgument());
+ break;
+ }
+
case Expr::UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(E);
mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
@@ -1641,6 +1796,18 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E);
+
+ // Array subscript is treated as a syntactically wierd form of
+ // binary operator.
+ Out << "ix";
+ mangleExpression(AE->getLHS());
+ mangleExpression(AE->getRHS());
+ break;
+ }
+
+ case Expr::CompoundAssignOperatorClass: // fallthrough
case Expr::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(E);
mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
@@ -1757,12 +1924,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
const FloatingLiteral *FL = cast<FloatingLiteral>(E);
Out << 'L';
mangleType(FL->getType());
-
- // TODO: avoid this copy with careful stream management.
- llvm::SmallString<20> Buffer;
- FL->getValue().bitcastToAPInt().toString(Buffer, 16, false);
- Out.write(Buffer.data(), Buffer.size());
-
+ mangleFloat(FL->getValue());
Out << 'E';
break;
}
@@ -1780,16 +1942,62 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
Out << 'E';
break;
- case Expr::IntegerLiteralClass:
- mangleIntegerLiteral(E->getType(),
- llvm::APSInt(cast<IntegerLiteral>(E)->getValue()));
+ case Expr::IntegerLiteralClass: {
+ llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue());
+ if (E->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleIntegerLiteral(E->getType(), Value);
break;
+ }
+ case Expr::ImaginaryLiteralClass: {
+ const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
+ // Mangle as if a complex literal.
+ // Proposal from David Vandervoorde, 2010.06.30.
+ Out << 'L';
+ mangleType(E->getType());
+ if (const FloatingLiteral *Imag =
+ dyn_cast<FloatingLiteral>(IE->getSubExpr())) {
+ // Mangle a floating-point zero of the appropriate type.
+ mangleFloat(llvm::APFloat(Imag->getValue().getSemantics()));
+ Out << '_';
+ mangleFloat(Imag->getValue());
+ } else {
+ Out << '0' << '_';
+ llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue());
+ if (IE->getSubExpr()->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleNumber(Value);
+ }
+ Out << 'E';
+ break;
+ }
+
+ case Expr::StringLiteralClass: {
+ // Proposal from David Vandervoorde, 2010.06.30.
+ // I've sent a comment off asking whether this needs to also
+ // represent the length of the string.
+ Out << 'L';
+ const ConstantArrayType *T = cast<ConstantArrayType>(E->getType());
+ QualType CharTy = T->getElementType().getUnqualifiedType();
+ mangleType(CharTy);
+ Out << 'E';
+ break;
}
-}
-// FIXME: <type> ::= G <type> # imaginary (C 2000)
-// FIXME: <type> ::= U <source-name> <type> # vendor extended type qualifier
+ case Expr::GNUNullExprClass:
+ // FIXME: should this really be mangled the same as nullptr?
+ // fallthrough
+
+ case Expr::CXXNullPtrLiteralExprClass: {
+ // Proposal from David Vandervoorde, 2010.06.30, as
+ // modified by ABI list discussion.
+ Out << "LDnE";
+ break;
+ }
+
+ }
+}
void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
// <ctor-dtor-name> ::= C1 # complete object constructor
@@ -1874,9 +2082,8 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
mangleType(A.getAsType());
break;
case TemplateArgument::Template:
- assert(A.getAsTemplate().getAsTemplateDecl() &&
- "Can't get dependent template names here");
- mangleName(A.getAsTemplate().getAsTemplateDecl());
+ // This is mangled as <type>.
+ mangleType(A.getAsTemplate());
break;
case TemplateArgument::Expression:
Out << 'X';
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 73530d4..c65f203 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -2365,7 +2365,6 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
- case llvm::Triple::Minix:
return *(TheTargetCodeGenInfo =
new X86_32TargetCodeGenInfo(Context, false, true));
OpenPOWER on IntegriCloud