diff options
author | rdivacky <rdivacky@FreeBSD.org> | 2009-12-01 11:08:04 +0000 |
---|---|---|
committer | rdivacky <rdivacky@FreeBSD.org> | 2009-12-01 11:08:04 +0000 |
commit | 4b08eb6308ca90a6c08e2fc79d100821b1b1f6aa (patch) | |
tree | 867cbbe32a66fd7d62dd9ce9df23a23fefdb8290 /lib/CodeGen | |
parent | 6df2408694f81a03eb8b0e3b013272042233c061 (diff) | |
download | FreeBSD-src-4b08eb6308ca90a6c08e2fc79d100821b1b1f6aa.zip FreeBSD-src-4b08eb6308ca90a6c08e2fc79d100821b1b1f6aa.tar.gz |
Update clang to r90226.
Diffstat (limited to 'lib/CodeGen')
33 files changed, 2325 insertions, 1389 deletions
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp index bc9eb67..2df779c 100644 --- a/lib/CodeGen/CGBlocks.cpp +++ b/lib/CodeGen/CGBlocks.cpp @@ -291,14 +291,14 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); llvm::Value *Loc = LocalDeclMap[VD]; Loc = Builder.CreateStructGEP(Loc, 1, "forwarding"); - Loc = Builder.CreateLoad(Loc, false); + Loc = Builder.CreateLoad(Loc); Builder.CreateStore(Loc, Addr); ++helpersize; continue; } else E = new (getContext()) DeclRefExpr (cast<NamedDecl>(VD), - VD->getType(), SourceLocation(), - false, false); + VD->getType(), + SourceLocation()); } if (BDRE->isByRef()) { NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | @@ -331,7 +331,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { "block.literal"); Ty = llvm::PointerType::get(Ty, 0); Loc = Builder.CreateBitCast(Loc, Ty); - Loc = Builder.CreateLoad(Loc, false); + Loc = Builder.CreateLoad(Loc); // Loc = Builder.CreateBitCast(Loc, Ty); } Builder.CreateStore(Loc, Addr); @@ -494,7 +494,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) { E->arg_begin(), E->arg_end()); // Load the function. - llvm::Value *Func = Builder.CreateLoad(FuncPtr, false, "tmp"); + llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp"); QualType ResultType = FnType->getAs<FunctionType>()->getResultType(); @@ -551,9 +551,9 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { const llvm::Type *Ty = PtrStructTy; Ty = llvm::PointerType::get(Ty, 0); V = Builder.CreateBitCast(V, Ty); - V = Builder.CreateLoad(V, false); + V = Builder.CreateLoad(V); V = Builder.CreateStructGEP(V, 1, "forwarding"); - V = Builder.CreateLoad(V, false); + V = Builder.CreateLoad(V); V = Builder.CreateBitCast(V, PtrStructTy); V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), VD->getNameAsString()); @@ -836,7 +836,7 @@ uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) { 0, QualType(PadTy), 0, VarDecl::None); Expr *E; E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(), - SourceLocation(), false, false); + SourceLocation()); BlockDeclRefDecls.push_back(E); } BlockDeclRefDecls.push_back(BDRE); diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 399b873..be4c27c 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -530,7 +530,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *Ptr = EmitScalarExpr(E->getArg(0)); const llvm::Type *ElTy = cast<llvm::PointerType>(Ptr->getType())->getElementType(); - Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr, true); + llvm::StoreInst *Store = + Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr); + Store->setVolatile(true); return RValue::get(0); } @@ -813,6 +815,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); return Builder.CreateStore(Ops[1], Ops[0]); } + case X86::BI__builtin_ia32_palignr128: + case X86::BI__builtin_ia32_palignr: { + Function *F = CGM.getIntrinsic(BuiltinID == X86::BI__builtin_ia32_palignr128 ? + Intrinsic::x86_ssse3_palign_r_128 : + Intrinsic::x86_ssse3_palign_r); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size()); + } } } diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp index b1d30a6..34d1c8d 100644 --- a/lib/CodeGen/CGCXX.cpp +++ b/lib/CodeGen/CGCXX.cpp @@ -151,8 +151,7 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D, "thread safe statics are currently not supported!"); llvm::SmallString<256> GuardVName; - llvm::raw_svector_ostream GuardVOut(GuardVName); - mangleGuardVariable(CGM.getMangleContext(), &D, GuardVOut); + CGM.getMangleContext().mangleGuardVariable(&D, GuardVName); // Create the guard variable. llvm::GlobalValue *GuardV = @@ -197,11 +196,6 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, assert(MD->isInstance() && "Trying to emit a member call expr on a static method!"); - // A call to a trivial destructor requires no code generation. - if (const CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(MD)) - if (Destructor->isTrivial()) - return RValue::get(0); - const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); CallArgList Args; @@ -275,6 +269,14 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE) { This = BaseLV.getAddress(); } + if (MD->isCopyAssignment() && MD->isTrivial()) { + // We don't like to generate the trivial copy assignment operator when + // it isn't necessary; just produce the proper effect here. + llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); + EmitAggregateCopy(This, RHS, CE->getType()); + return RValue::get(This); + } + // C++ [class.virtual]p12: // Explicit qualification with the scope operator (5.1) suppresses the // virtual call mechanism. @@ -284,6 +286,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE) { llvm::Value *Callee; if (const CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(MD)) { + if (Destructor->isTrivial()) + return RValue::get(0); if (MD->isVirtual() && !ME->hasQualifier() && !canDevirtualizeMemberFunctionCalls(ME->getBase())) { Callee = BuildVirtualCall(Destructor, Dtor_Complete, This, Ty); @@ -464,26 +468,31 @@ llvm::Value *CodeGenFunction::LoadCXXThis() { /// It is assumed that all relevant checks have been made by the caller. void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, - const ConstantArrayType *ArrayTy, - llvm::Value *ArrayPtr) { + const ConstantArrayType *ArrayTy, + llvm::Value *ArrayPtr, + CallExpr::const_arg_iterator ArgBeg, + CallExpr::const_arg_iterator ArgEnd) { + const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); llvm::Value * NumElements = llvm::ConstantInt::get(SizeTy, getContext().getConstantArrayElementCount(ArrayTy)); - EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr); + EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr, ArgBeg, ArgEnd); } void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, - llvm::Value *NumElements, - llvm::Value *ArrayPtr) { + llvm::Value *NumElements, + llvm::Value *ArrayPtr, + CallExpr::const_arg_iterator ArgBeg, + CallExpr::const_arg_iterator ArgEnd) { const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); // Create a temporary for the loop index and initialize it with 0. llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index"); llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); - Builder.CreateStore(Zero, IndexPtr, false); + Builder.CreateStore(Zero, IndexPtr); // Start the loop with a block that tests the condition. llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); @@ -507,15 +516,31 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, Counter = Builder.CreateLoad(IndexPtr); llvm::Value *Address = Builder.CreateInBoundsGEP(ArrayPtr, Counter, "arrayidx"); - EmitCXXConstructorCall(D, Ctor_Complete, Address, 0, 0); + // C++ [class.temporary]p4: + // There are two contexts in which temporaries are destroyed at a different + // point than the end of the full- expression. The first context is when a + // default constructor is called to initialize an element of an array. + // If the constructor has one or more default arguments, the destruction of + // every temporary created in a default argument expression is sequenced + // before the construction of the next array element, if any. + + // Keep track of the current number of live temporaries. + unsigned OldNumLiveTemporaries = LiveTemporaries.size(); + + EmitCXXConstructorCall(D, Ctor_Complete, Address, ArgBeg, ArgEnd); + + // Pop temporaries. + while (LiveTemporaries.size() > OldNumLiveTemporaries) + PopCXXTemporary(); + EmitBlock(ContinueBlock); // Emit the increment of the loop counter. llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1); Counter = Builder.CreateLoad(IndexPtr); NextVal = Builder.CreateAdd(Counter, NextVal, "inc"); - Builder.CreateStore(NextVal, IndexPtr, false); + Builder.CreateStore(NextVal, IndexPtr); // Finally, branch back up to the condition for the next iteration. EmitBranch(CondBlock); @@ -551,7 +576,7 @@ CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D, llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext), "loop.index"); // Index = ElementCount; - Builder.CreateStore(UpperCount, IndexPtr, false); + Builder.CreateStore(UpperCount, IndexPtr); // Start the loop with a block that tests the condition. llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); @@ -578,23 +603,14 @@ CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D, Counter = Builder.CreateLoad(IndexPtr); Counter = Builder.CreateSub(Counter, One); llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx"); - if (D->isVirtual()) { - const llvm::Type *Ty = - CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(D), - /*isVariadic=*/false); - - llvm::Value *Callee = BuildVirtualCall(D, Dtor_Deleting, Address, Ty); - EmitCXXMemberCall(D, Callee, Address, 0, 0); - } - else - EmitCXXDestructorCall(D, Dtor_Complete, Address); + EmitCXXDestructorCall(D, Dtor_Complete, Address); EmitBlock(ContinueBlock); // Emit the decrement of the loop counter. Counter = Builder.CreateLoad(IndexPtr); Counter = Builder.CreateSub(Counter, One, "dec"); - Builder.CreateStore(Counter, IndexPtr, false); + Builder.CreateStore(Counter, IndexPtr); // Finally, branch back up to the condition for the next iteration. EmitBranch(CondBlock); @@ -664,6 +680,10 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, EmitAggregateCopy(This, Src, Ty); return; } + } else if (D->isTrivial()) { + // FIXME: Track down why we're trying to generate calls to the trivial + // default constructor! + return; } llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type); @@ -674,6 +694,15 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, llvm::Value *This) { + if (D->isVirtual()) { + const llvm::Type *Ty = + CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(D), + /*isVariadic=*/false); + + llvm::Value *Callee = BuildVirtualCall(D, Dtor_Deleting, This, Ty); + EmitCXXMemberCall(D, Callee, This, 0, 0); + return; + } llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(D, Type); EmitCXXMemberCall(D, Callee, This, 0, 0); @@ -715,7 +744,8 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest, BasePtr = llvm::PointerType::getUnqual(BasePtr); llvm::Value *BaseAddrPtr = Builder.CreateBitCast(Dest, BasePtr); - EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr); + EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr, + E->arg_begin(), E->arg_end()); } else // Call the constructor. @@ -744,7 +774,7 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type) { const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>(); const llvm::FunctionType *FTy = - getTypes().GetFunctionType(getTypes().getFunctionInfo(D), + getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), FPT->isVariadic()); const char *Name = getMangledCXXCtorName(D, Type); @@ -755,8 +785,7 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D, const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D, CXXCtorType Type) { llvm::SmallString<256> Name; - llvm::raw_svector_ostream Out(Name); - mangleCXXCtor(getMangleContext(), D, Type, Out); + getMangleContext().mangleCXXCtor(D, Type, Name); Name += '\0'; return UniqueMangledName(Name.begin(), Name.end()); @@ -764,9 +793,9 @@ const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D, void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) { if (D->isVirtual()) - EmitCXXDestructor(D, Dtor_Deleting); - EmitCXXDestructor(D, Dtor_Complete); - EmitCXXDestructor(D, Dtor_Base); + EmitGlobalDefinition(GlobalDecl(D, Dtor_Deleting)); + EmitGlobalDefinition(GlobalDecl(D, Dtor_Complete)); + EmitGlobalDefinition(GlobalDecl(D, Dtor_Base)); } void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D, @@ -783,7 +812,7 @@ llvm::Function * CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type) { const llvm::FunctionType *FTy = - getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false); + getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false); const char *Name = getMangledCXXDtorName(D, Type); return cast<llvm::Function>( @@ -793,59 +822,61 @@ CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D, const char *CodeGenModule::getMangledCXXDtorName(const CXXDestructorDecl *D, CXXDtorType Type) { llvm::SmallString<256> Name; - llvm::raw_svector_ostream Out(Name); - mangleCXXDtor(getMangleContext(), D, Type, Out); + getMangleContext().mangleCXXDtor(D, Type, Name); Name += '\0'; return UniqueMangledName(Name.begin(), Name.end()); } -llvm::Constant *CodeGenFunction::GenerateThunk(llvm::Function *Fn, - const CXXMethodDecl *MD, - bool Extern, int64_t nv, - int64_t v) { - return GenerateCovariantThunk(Fn, MD, Extern, nv, v, 0, 0); +llvm::Constant * +CodeGenFunction::GenerateThunk(llvm::Function *Fn, const CXXMethodDecl *MD, + bool Extern, + const ThunkAdjustment &ThisAdjustment) { + return GenerateCovariantThunk(Fn, MD, Extern, + CovariantThunkAdjustment(ThisAdjustment, + ThunkAdjustment())); } -llvm::Value *CodeGenFunction::DynamicTypeAdjust(llvm::Value *V, int64_t nv, - int64_t v) { - llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), - 0); +llvm::Value * +CodeGenFunction::DynamicTypeAdjust(llvm::Value *V, + const ThunkAdjustment &Adjustment) { + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); + const llvm::Type *OrigTy = V->getType(); - if (nv) { + if (Adjustment.NonVirtual) { // Do the non-virtual adjustment - V = Builder.CreateBitCast(V, Ptr8Ty); - V = Builder.CreateConstInBoundsGEP1_64(V, nv); - V = Builder.CreateBitCast(V, OrigTy); - } - if (v) { - // Do the virtual this adjustment - const llvm::Type *PtrDiffTy = - ConvertType(getContext().getPointerDiffType()); - llvm::Type *PtrPtr8Ty, *PtrPtrDiffTy; - PtrPtr8Ty = llvm::PointerType::get(Ptr8Ty, 0); - PtrPtrDiffTy = llvm::PointerType::get(PtrDiffTy, 0); - llvm::Value *ThisVal = Builder.CreateBitCast(V, Ptr8Ty); - V = Builder.CreateBitCast(V, PtrPtrDiffTy->getPointerTo()); - V = Builder.CreateLoad(V, "vtable"); - llvm::Value *VTablePtr = V; - assert(v % (LLVMPointerWidth/8) == 0 && "vtable entry unaligned"); - v /= LLVMPointerWidth/8; - V = Builder.CreateConstInBoundsGEP1_64(VTablePtr, v); - V = Builder.CreateLoad(V); - V = Builder.CreateGEP(ThisVal, V); + V = Builder.CreateBitCast(V, Int8PtrTy); + V = Builder.CreateConstInBoundsGEP1_64(V, Adjustment.NonVirtual); V = Builder.CreateBitCast(V, OrigTy); } - return V; + + if (!Adjustment.Virtual) + return V; + + assert(Adjustment.Virtual % (LLVMPointerWidth / 8) == 0 && + "vtable entry unaligned"); + + // Do the virtual this adjustment + const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType()); + const llvm::Type *PtrDiffPtrTy = PtrDiffTy->getPointerTo(); + + llvm::Value *ThisVal = Builder.CreateBitCast(V, Int8PtrTy); + V = Builder.CreateBitCast(V, PtrDiffPtrTy->getPointerTo()); + V = Builder.CreateLoad(V, "vtable"); + + llvm::Value *VTablePtr = V; + uint64_t VirtualAdjustment = Adjustment.Virtual / (LLVMPointerWidth / 8); + V = Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment); + V = Builder.CreateLoad(V); + V = Builder.CreateGEP(ThisVal, V); + + return Builder.CreateBitCast(V, OrigTy); } -llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn, - const CXXMethodDecl *MD, - bool Extern, - int64_t nv_t, - int64_t v_t, - int64_t nv_r, - int64_t v_r) { +llvm::Constant * +CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn, + const CXXMethodDecl *MD, bool Extern, + const CovariantThunkAdjustment &Adjustment) { QualType ResultType = MD->getType()->getAs<FunctionType>()->getResultType(); FunctionArgList Args; @@ -878,16 +909,23 @@ llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn, llvm::Value *Callee = CGM.GetAddrOfFunction(MD, Ty); CallArgList CallArgs; + bool ShouldAdjustReturnPointer = true; QualType ArgType = MD->getThisType(getContext()); llvm::Value *Arg = Builder.CreateLoad(LocalDeclMap[ThisDecl], "this"); - if (nv_t || v_t) { + if (!Adjustment.ThisAdjustment.isEmpty()) { // Do the this adjustment. const llvm::Type *OrigTy = Callee->getType(); - Arg = DynamicTypeAdjust(Arg, nv_t, v_t); - if (nv_r || v_r) { - Callee = CGM.BuildCovariantThunk(MD, Extern, 0, 0, nv_r, v_r); + Arg = DynamicTypeAdjust(Arg, Adjustment.ThisAdjustment); + + if (!Adjustment.ReturnAdjustment.isEmpty()) { + const CovariantThunkAdjustment &ReturnAdjustment = + CovariantThunkAdjustment(ThunkAdjustment(), + Adjustment.ReturnAdjustment); + + Callee = CGM.BuildCovariantThunk(MD, Extern, ReturnAdjustment); + Callee = Builder.CreateBitCast(Callee, OrigTy); - nv_r = v_r = 0; + ShouldAdjustReturnPointer = false; } } @@ -906,7 +944,7 @@ llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn, RValue RV = EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs), Callee, CallArgs, MD); - if (nv_r || v_r) { + if (ShouldAdjustReturnPointer && !Adjustment.ReturnAdjustment.isEmpty()) { bool CanBeZero = !(ResultType->isReferenceType() // FIXME: attr nonnull can't be zero either /* || ResultType->hasAttr<NonNullAttr>() */ ); @@ -921,7 +959,8 @@ llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn, Builder.CreateCondBr(Builder.CreateICmpNE(RV.getScalarVal(), Zero), NonZeroBlock, ZeroBlock); EmitBlock(NonZeroBlock); - llvm::Value *NZ = DynamicTypeAdjust(RV.getScalarVal(), nv_r, v_r); + llvm::Value *NZ = + DynamicTypeAdjust(RV.getScalarVal(), Adjustment.ReturnAdjustment); EmitBranch(ContBlock); EmitBlock(ZeroBlock); llvm::Value *Z = RV.getScalarVal(); @@ -932,7 +971,8 @@ llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn, RVOrZero->addIncoming(Z, ZeroBlock); RV = RValue::get(RVOrZero); } else - RV = RValue::get(DynamicTypeAdjust(RV.getScalarVal(), nv_r, v_r)); + RV = RValue::get(DynamicTypeAdjust(RV.getScalarVal(), + Adjustment.ReturnAdjustment)); } if (!ResultType->isVoidType()) @@ -942,11 +982,13 @@ llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn, return Fn; } -llvm::Constant *CodeGenModule::BuildThunk(const CXXMethodDecl *MD, bool Extern, - int64_t nv, int64_t v) { +llvm::Constant * +CodeGenModule::BuildThunk(const CXXMethodDecl *MD, bool Extern, + const ThunkAdjustment &ThisAdjustment) { + llvm::SmallString<256> OutName; - llvm::raw_svector_ostream Out(OutName); - mangleThunk(getMangleContext(), MD, nv, v, Out); + getMangleContext().mangleThunk(MD, ThisAdjustment, OutName); + llvm::GlobalVariable::LinkageTypes linktype; linktype = llvm::GlobalValue::WeakAnyLinkage; if (!Extern) @@ -957,20 +999,18 @@ llvm::Constant *CodeGenModule::BuildThunk(const CXXMethodDecl *MD, bool Extern, getTypes().GetFunctionType(getTypes().getFunctionInfo(MD), FPT->isVariadic()); - llvm::Function *Fn = llvm::Function::Create(FTy, linktype, Out.str(), + llvm::Function *Fn = llvm::Function::Create(FTy, linktype, OutName.str(), &getModule()); - CodeGenFunction(*this).GenerateThunk(Fn, MD, Extern, nv, v); + CodeGenFunction(*this).GenerateThunk(Fn, MD, Extern, ThisAdjustment); llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty); return m; } -llvm::Constant *CodeGenModule::BuildCovariantThunk(const CXXMethodDecl *MD, - bool Extern, int64_t nv_t, - int64_t v_t, int64_t nv_r, - int64_t v_r) { +llvm::Constant * +CodeGenModule::BuildCovariantThunk(const CXXMethodDecl *MD, bool Extern, + const CovariantThunkAdjustment &Adjustment) { llvm::SmallString<256> OutName; - llvm::raw_svector_ostream Out(OutName); - mangleCovariantThunk(getMangleContext(), MD, nv_t, v_t, nv_r, v_r, Out); + getMangleContext().mangleCovariantThunk(MD, Adjustment, OutName); llvm::GlobalVariable::LinkageTypes linktype; linktype = llvm::GlobalValue::WeakAnyLinkage; if (!Extern) @@ -981,10 +1021,9 @@ llvm::Constant *CodeGenModule::BuildCovariantThunk(const CXXMethodDecl *MD, getTypes().GetFunctionType(getTypes().getFunctionInfo(MD), FPT->isVariadic()); - llvm::Function *Fn = llvm::Function::Create(FTy, linktype, Out.str(), + llvm::Function *Fn = llvm::Function::Create(FTy, linktype, OutName.str(), &getModule()); - CodeGenFunction(*this).GenerateCovariantThunk(Fn, MD, Extern, nv_t, v_t, nv_r, - v_r); + CodeGenFunction(*this).GenerateCovariantThunk(Fn, MD, Extern, Adjustment); llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty); return m; } @@ -1016,7 +1055,7 @@ CodeGenFunction::GetVirtualCXXBaseClassOffset(llvm::Value *This, return VBaseOffset; } -static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, int64_t VtableIndex, +static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VtableIndex, llvm::Value *This, const llvm::Type *Ty) { Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo(); @@ -1032,7 +1071,7 @@ llvm::Value * CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This, const llvm::Type *Ty) { MD = MD->getCanonicalDecl(); - int64_t VtableIndex = CGM.getVtableInfo().getMethodVtableIndex(MD); + uint64_t VtableIndex = CGM.getVtableInfo().getMethodVtableIndex(MD); return ::BuildVirtualCall(*this, VtableIndex, This, Ty); } @@ -1041,7 +1080,7 @@ llvm::Value * CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type, llvm::Value *&This, const llvm::Type *Ty) { DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl()); - int64_t VtableIndex = + uint64_t VtableIndex = CGM.getVtableInfo().getMethodVtableIndex(GlobalDecl(DD, Type)); return ::BuildVirtualCall(*this, VtableIndex, This, Ty); @@ -1065,7 +1104,7 @@ void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest, "loop.index"); llvm::Value* zeroConstant = llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext)); - Builder.CreateStore(zeroConstant, IndexPtr, false); + Builder.CreateStore(zeroConstant, IndexPtr); // Start the loop with a block that tests the condition. llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); @@ -1115,7 +1154,7 @@ void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest, llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); Counter = Builder.CreateLoad(IndexPtr); NextVal = Builder.CreateAdd(Counter, NextVal, "inc"); - Builder.CreateStore(NextVal, IndexPtr, false); + Builder.CreateStore(NextVal, IndexPtr); // Finally, branch back up to the condition for the next iteration. EmitBranch(CondBlock); @@ -1142,7 +1181,7 @@ void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest, "loop.index"); llvm::Value* zeroConstant = llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext)); - Builder.CreateStore(zeroConstant, IndexPtr, false); + Builder.CreateStore(zeroConstant, IndexPtr); // Start the loop with a block that tests the condition. llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); @@ -1199,7 +1238,7 @@ void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest, llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); Counter = Builder.CreateLoad(IndexPtr); NextVal = Builder.CreateAdd(Counter, NextVal, "inc"); - Builder.CreateStore(NextVal, IndexPtr, false); + Builder.CreateStore(NextVal, IndexPtr); // Finally, branch back up to the condition for the next iteration. EmitBranch(CondBlock); @@ -1216,10 +1255,10 @@ void CodeGenFunction::EmitClassMemberwiseCopy( const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl, QualType Ty) { if (ClassDecl) { - Dest = GetAddressCXXOfBaseClass(Dest, ClassDecl, BaseClassDecl, - /*NullCheckValue=*/false); - Src = GetAddressCXXOfBaseClass(Src, ClassDecl, BaseClassDecl, - /*NullCheckValue=*/false); + Dest = GetAddressOfBaseClass(Dest, ClassDecl, BaseClassDecl, + /*NullCheckValue=*/false); + Src = GetAddressOfBaseClass(Src, ClassDecl, BaseClassDecl, + /*NullCheckValue=*/false); } if (BaseClassDecl->hasTrivialCopyConstructor()) { EmitAggregateCopy(Dest, Src, Ty); @@ -1255,10 +1294,10 @@ void CodeGenFunction::EmitClassCopyAssignment( const CXXRecordDecl *BaseClassDecl, QualType Ty) { if (ClassDecl) { - Dest = GetAddressCXXOfBaseClass(Dest, ClassDecl, BaseClassDecl, - /*NullCheckValue=*/false); - Src = GetAddressCXXOfBaseClass(Src, ClassDecl, BaseClassDecl, - /*NullCheckValue=*/false); + Dest = GetAddressOfBaseClass(Dest, ClassDecl, BaseClassDecl, + /*NullCheckValue=*/false); + Src = GetAddressOfBaseClass(Src, ClassDecl, BaseClassDecl, + /*NullCheckValue=*/false); } if (BaseClassDecl->hasTrivialCopyAssignment()) { EmitAggregateCopy(Dest, Src, Ty); @@ -1297,6 +1336,7 @@ CodeGenFunction::SynthesizeDefaultConstructor(const CXXConstructorDecl *Ctor, CXXCtorType Type, llvm::Function *Fn, const FunctionArgList &Args) { + assert(!Ctor->isTrivial() && "shouldn't need to generate trivial ctor"); StartFunction(GlobalDecl(Ctor, Type), Ctor->getResultType(), Fn, Args, SourceLocation()); EmitCtorPrologue(Ctor, Type); @@ -1326,6 +1366,7 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor, const CXXRecordDecl *ClassDecl = Ctor->getParent(); assert(!ClassDecl->hasUserDeclaredCopyConstructor() && "SynthesizeCXXCopyConstructor - copy constructor has definition already"); + assert(!Ctor->isTrivial() && "shouldn't need to generate trivial ctor"); StartFunction(GlobalDecl(Ctor, Type), Ctor->getResultType(), Fn, Args, SourceLocation()); @@ -1349,10 +1390,11 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor, Base->getType()); } - for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(), - FieldEnd = ClassDecl->field_end(); - Field != FieldEnd; ++Field) { - QualType FieldType = getContext().getCanonicalType((*Field)->getType()); + for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), + E = ClassDecl->field_end(); I != E; ++I) { + const FieldDecl *Field = *I; + + QualType FieldType = getContext().getCanonicalType(Field->getType()); const ConstantArrayType *Array = getContext().getAsConstantArrayType(FieldType); if (Array) @@ -1361,8 +1403,8 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor, if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) { CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(FieldClassType->getDecl()); - LValue LHS = EmitLValueForField(LoadOfThis, *Field, false, 0); - LValue RHS = EmitLValueForField(LoadOfSrc, *Field, false, 0); + LValue LHS = EmitLValueForField(LoadOfThis, Field, false, 0); + LValue RHS = EmitLValueForField(LoadOfSrc, Field, false, 0); if (Array) { const llvm::Type *BasePtr = ConvertType(FieldType); BasePtr = llvm::PointerType::getUnqual(BasePtr); @@ -1378,9 +1420,28 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor, 0 /*ClassDecl*/, FieldClassDecl, FieldType); continue; } + + if (Field->getType()->isReferenceType()) { + unsigned FieldIndex = CGM.getTypes().getLLVMFieldNo(Field); + + llvm::Value *LHS = Builder.CreateStructGEP(LoadOfThis, FieldIndex, + "lhs.ref"); + + llvm::Value *RHS = Builder.CreateStructGEP(LoadOfThis, FieldIndex, + "rhs.ref"); + + // Load the value in RHS. + RHS = Builder.CreateLoad(RHS); + + // And store it in the LHS + Builder.CreateStore(RHS, LHS); + + continue; + } // Do a built-in assignment of scalar data members. - LValue LHS = EmitLValueForField(LoadOfThis, *Field, false, 0); - LValue RHS = EmitLValueForField(LoadOfSrc, *Field, false, 0); + LValue LHS = EmitLValueForField(LoadOfThis, Field, false, 0); + LValue RHS = EmitLValueForField(LoadOfSrc, Field, false, 0); + if (!hasAggregateLLVMType(Field->getType())) { RValue RVRHS = EmitLoadOfLValue(RHS, Field->getType()); EmitStoreThroughLValue(RVRHS, LHS, Field->getType()); @@ -1498,9 +1559,9 @@ static void EmitBaseInitializer(CodeGenFunction &CGF, const Type *BaseType = BaseInit->getBaseClass(); CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); - llvm::Value *V = CGF.GetAddressCXXOfBaseClass(ThisPtr, ClassDecl, - BaseClassDecl, - /*NullCheckValue=*/false); + llvm::Value *V = CGF.GetAddressOfBaseClass(ThisPtr, ClassDecl, + BaseClassDecl, + /*NullCheckValue=*/false); CGF.EmitCXXConstructorCall(BaseInit->getConstructor(), CtorType, V, BaseInit->const_arg_begin(), @@ -1564,7 +1625,9 @@ static void EmitMemberInitializer(CodeGenFunction &CGF, llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(LHS.getAddress(), BasePtr); CGF.EmitCXXAggrConstructorCall(MemberInit->getConstructor(), - Array, BaseAddrPtr); + Array, BaseAddrPtr, + MemberInit->const_arg_begin(), + MemberInit->const_arg_end()); } else CGF.EmitCXXConstructorCall(MemberInit->getConstructor(), @@ -1714,12 +1777,12 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD, // Ignore trivial destructors. if (BaseClassDecl->hasTrivialDestructor()) continue; - - llvm::Value *V = GetAddressCXXOfBaseClass(LoadCXXThis(), - ClassDecl, BaseClassDecl, - /*NullCheckValue=*/false); - EmitCXXDestructorCall(BaseClassDecl->getDestructor(getContext()), - Dtor_Base, V); + const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext()); + + llvm::Value *V = GetAddressOfBaseClass(LoadCXXThis(), + ClassDecl, BaseClassDecl, + /*NullCheckValue=*/false); + EmitCXXDestructorCall(D, Dtor_Base, V); } // If we're emitting a base destructor, we don't want to emit calls to the @@ -1727,10 +1790,21 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD, if (DtorType == Dtor_Base) return; - // FIXME: Handle virtual bases. + // Handle virtual bases. for (CXXRecordDecl::reverse_base_class_const_iterator I = ClassDecl->vbases_rbegin(), E = ClassDecl->vbases_rend(); I != E; ++I) { - assert(false && "FIXME: Handle virtual bases."); + const CXXBaseSpecifier &Base = *I; + CXXRecordDecl *BaseClassDecl + = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); + + // Ignore trivial destructors. + if (BaseClassDecl->hasTrivialDestructor()) + continue; + const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext()); + llvm::Value *V = GetAddressOfBaseClass(LoadCXXThis(), + ClassDecl, BaseClassDecl, + /*NullCheckValue=*/false); + EmitCXXDestructorCall(D, Dtor_Base, V); } // If we have a deleting destructor, emit a call to the delete operator. @@ -1752,9 +1826,3 @@ void CodeGenFunction::SynthesizeDefaultDestructor(const CXXDestructorDecl *Dtor, EmitDtorEpilogue(Dtor, DtorType); FinishFunction(); } - -// FIXME: Move this to CGStmtCXX.cpp -void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) { - // FIXME: We need to do more here. - EmitStmt(S.getTryBlock()); -} diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp index d0c7d03..decc73c 100644 --- a/lib/CodeGen/CGCall.cpp +++ b/lib/CodeGen/CGCall.cpp @@ -91,6 +91,42 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) { getCallingConventionForDecl(MD)); } +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D, + CXXCtorType Type) { + llvm::SmallVector<QualType, 16> ArgTys; + + // Add the 'this' pointer. + ArgTys.push_back(D->getThisType(Context)); + + // Check if we need to add a VTT parameter (which has type void **). + if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0) + ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); + + const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>(); + for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) + ArgTys.push_back(FTP->getArgType(i)); + return getFunctionInfo(FTP->getResultType(), ArgTys, + getCallingConventionForDecl(D)); +} + +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D, + CXXDtorType Type) { + llvm::SmallVector<QualType, 16> ArgTys; + + // Add the 'this' pointer. + ArgTys.push_back(D->getThisType(Context)); + + // Check if we need to add a VTT parameter (which has type void **). + if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0) + ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy)); + + const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>(); + for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) + ArgTys.push_back(FTP->getArgType(i)); + return getFunctionInfo(FTP->getResultType(), ArgTys, + getCallingConventionForDecl(D)); +} + const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) { if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) if (MD->isInstance()) @@ -418,6 +454,32 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); } +static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) { + if (const TagType *TT = T->getResultType()->getAs<TagType>()) { + if (!TT->getDecl()->isDefinition()) + return true; + } + + for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) { + if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) { + if (!TT->getDecl()->isDefinition()) + return true; + } + } + + return false; +} + +const llvm::Type * +CodeGenTypes::GetFunctionTypeForVtable(const CXXMethodDecl *MD) { + const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); + + if (!HasIncompleteReturnTypeOrArgumentTypes(FPT)) + return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic()); + + return llvm::OpaqueType::get(getLLVMContext()); +} + void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, const Decl *TargetDecl, AttributeListType &PAL, diff --git a/lib/CodeGen/CGCXXClass.cpp b/lib/CodeGen/CGClass.cpp index 533aabc..b3c2b98 100644 --- a/lib/CodeGen/CGCXXClass.cpp +++ b/lib/CodeGen/CGClass.cpp @@ -1,4 +1,4 @@ -//===--- CGCXXClass.cpp - Emit LLVM Code for C++ classes ------------------===// +//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// // // The LLVM Compiler Infrastructure // @@ -31,9 +31,6 @@ ComputeNonVirtualBaseClassOffset(ASTContext &Context, CXXBasePaths &Paths, const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class); const CXXBaseSpecifier *BS = Element.Base; - // FIXME: enable test3 from virt.cc to not abort. - if (BS->isVirtual()) - return 0; assert(!BS->isVirtual() && "Should not see virtual bases here!"); const CXXRecordDecl *Base = @@ -75,7 +72,7 @@ static llvm::Value *GetCXXBaseClassOffset(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl) { CXXBasePaths Paths(/*FindAmbiguities=*/false, - /*RecordPaths=*/true, /*DetectVirtual=*/true); + /*RecordPaths=*/true, /*DetectVirtual=*/false); if (!const_cast<CXXRecordDecl *>(ClassDecl)-> isDerivedFrom(const_cast<CXXRecordDecl *>(BaseClassDecl), Paths)) { assert(false && "Class must be derived from the passed in base class!"); @@ -84,21 +81,20 @@ static llvm::Value *GetCXXBaseClassOffset(CodeGenFunction &CGF, unsigned Start = 0; llvm::Value *VirtualOffset = 0; - if (const RecordType *RT = Paths.getDetectedVirtual()) { - const CXXRecordDecl *VBase = cast<CXXRecordDecl>(RT->getDecl()); - - VirtualOffset = - CGF.GetVirtualCXXBaseClassOffset(BaseValue, ClassDecl, VBase); - - const CXXBasePath &Path = Paths.front(); - unsigned e = Path.size(); - for (Start = 0; Start != e; ++Start) { - const CXXBasePathElement& Element = Path[Start]; - - if (Element.Class == VBase) - break; + + const CXXBasePath &Path = Paths.front(); + const CXXRecordDecl *VBase = 0; + for (unsigned i = 0, e = Path.size(); i != e; ++i) { + const CXXBasePathElement& Element = Path[i]; + if (Element.Base->isVirtual()) { + Start = i+1; + QualType VBaseType = Element.Base->getType(); + VBase = cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl()); } } + if (VBase) + VirtualOffset = + CGF.GetVirtualCXXBaseClassOffset(BaseValue, ClassDecl, VBase); uint64_t Offset = ComputeNonVirtualBaseClassOffset(CGF.getContext(), Paths, Start); @@ -117,10 +113,10 @@ static llvm::Value *GetCXXBaseClassOffset(CodeGenFunction &CGF, } llvm::Value * -CodeGenFunction::GetAddressCXXOfBaseClass(llvm::Value *BaseValue, - const CXXRecordDecl *ClassDecl, - const CXXRecordDecl *BaseClassDecl, - bool NullCheckValue) { +CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value, + const CXXRecordDecl *ClassDecl, + const CXXRecordDecl *BaseClassDecl, + bool NullCheckValue) { QualType BTy = getContext().getCanonicalType( getContext().getTypeDeclType(const_cast<CXXRecordDecl*>(BaseClassDecl))); @@ -128,7 +124,7 @@ CodeGenFunction::GetAddressCXXOfBaseClass(llvm::Value *BaseValue, if (ClassDecl == BaseClassDecl) { // Just cast back. - return Builder.CreateBitCast(BaseValue, BasePtrTy); + return Builder.CreateBitCast(Value, BasePtrTy); } llvm::BasicBlock *CastNull = 0; @@ -141,8 +137,8 @@ CodeGenFunction::GetAddressCXXOfBaseClass(llvm::Value *BaseValue, CastEnd = createBasicBlock("cast.end"); llvm::Value *IsNull = - Builder.CreateICmpEQ(BaseValue, - llvm::Constant::getNullValue(BaseValue->getType())); + Builder.CreateICmpEQ(Value, + llvm::Constant::getNullValue(Value->getType())); Builder.CreateCondBr(IsNull, CastNull, CastNotNull); EmitBlock(CastNotNull); } @@ -150,16 +146,16 @@ CodeGenFunction::GetAddressCXXOfBaseClass(llvm::Value *BaseValue, const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); llvm::Value *Offset = - GetCXXBaseClassOffset(*this, BaseValue, ClassDecl, BaseClassDecl); + GetCXXBaseClassOffset(*this, Value, ClassDecl, BaseClassDecl); if (Offset) { // Apply the offset. - BaseValue = Builder.CreateBitCast(BaseValue, Int8PtrTy); - BaseValue = Builder.CreateGEP(BaseValue, Offset, "add.ptr"); + Value = Builder.CreateBitCast(Value, Int8PtrTy); + Value = Builder.CreateGEP(Value, Offset, "add.ptr"); } // Cast back. - BaseValue = Builder.CreateBitCast(BaseValue, BasePtrTy); + Value = Builder.CreateBitCast(Value, BasePtrTy); if (NullCheckValue) { Builder.CreateBr(CastEnd); @@ -167,13 +163,73 @@ CodeGenFunction::GetAddressCXXOfBaseClass(llvm::Value *BaseValue, Builder.CreateBr(CastEnd); EmitBlock(CastEnd); - llvm::PHINode *PHI = Builder.CreatePHI(BaseValue->getType()); + llvm::PHINode *PHI = Builder.CreatePHI(Value->getType()); + PHI->reserveOperandSpace(2); + PHI->addIncoming(Value, CastNotNull); + PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), + CastNull); + Value = PHI; + } + + return Value; +} + +llvm::Value * +CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, + const CXXRecordDecl *ClassDecl, + const CXXRecordDecl *DerivedClassDecl, + bool NullCheckValue) { + QualType DerivedTy = + getContext().getCanonicalType( + getContext().getTypeDeclType(const_cast<CXXRecordDecl*>(DerivedClassDecl))); + const llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); + + if (ClassDecl == DerivedClassDecl) { + // Just cast back. + return Builder.CreateBitCast(Value, DerivedPtrTy); + } + + llvm::BasicBlock *CastNull = 0; + llvm::BasicBlock *CastNotNull = 0; + llvm::BasicBlock *CastEnd = 0; + + if (NullCheckValue) { + CastNull = createBasicBlock("cast.null"); + CastNotNull = createBasicBlock("cast.notnull"); + CastEnd = createBasicBlock("cast.end"); + + llvm::Value *IsNull = + Builder.CreateICmpEQ(Value, + llvm::Constant::getNullValue(Value->getType())); + Builder.CreateCondBr(IsNull, CastNull, CastNotNull); + EmitBlock(CastNotNull); + } + + llvm::Value *Offset = GetCXXBaseClassOffset(*this, Value, DerivedClassDecl, + ClassDecl); + if (Offset) { + // Apply the offset. + Value = Builder.CreatePtrToInt(Value, Offset->getType()); + Value = Builder.CreateSub(Value, Offset); + Value = Builder.CreateIntToPtr(Value, DerivedPtrTy); + } else { + // Just cast. + Value = Builder.CreateBitCast(Value, DerivedPtrTy); + } + + if (NullCheckValue) { + Builder.CreateBr(CastEnd); + EmitBlock(CastNull); + Builder.CreateBr(CastEnd); + EmitBlock(CastEnd); + + llvm::PHINode *PHI = Builder.CreatePHI(Value->getType()); PHI->reserveOperandSpace(2); - PHI->addIncoming(BaseValue, CastNotNull); - PHI->addIncoming(llvm::Constant::getNullValue(BaseValue->getType()), + PHI->addIncoming(Value, CastNotNull); + PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); - BaseValue = PHI; + Value = PHI; } - return BaseValue; + return Value; } diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp index 0551667..317da7e 100644 --- a/lib/CodeGen/CGDebugInfo.cpp +++ b/lib/CodeGen/CGDebugInfo.cpp @@ -95,10 +95,10 @@ llvm::DICompileUnit CGDebugInfo::getOrCreateCompileUnit(SourceLocation Loc) { // file at a time. bool isMain = false; const LangOptions &LO = M->getLangOptions(); - const char *MainFileName = LO.getMainFileName(); + const CodeGenOptions &CGO = M->getCodeGenOpts(); if (isMainCompileUnitCreated == false) { - if (MainFileName) { - if (!strcmp(AbsFileName.getLast().c_str(), MainFileName)) + if (!CGO.MainFileName.empty()) { + if (AbsFileName.getLast() == CGO.MainFileName) isMain = true; } else { if (Loc.isValid() && SM.isFromMainFile(Loc)) @@ -417,7 +417,7 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIType DbgTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_typedef, Unit, - Ty->getDecl()->getNameAsCString(), + Ty->getDecl()->getName(), DefUnit, Line, 0, 0, 0, 0, Src); return DbgTy; } @@ -482,7 +482,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty, // may refer to the forward decl if the struct is recursive) and replace all // uses of the forward declaration with the final definition. llvm::DICompositeType FwdDecl = - DebugFactory.CreateCompositeType(Tag, Unit, Decl->getNameAsString().data(), + DebugFactory.CreateCompositeType(Tag, Unit, Decl->getName(), DefUnit, Line, 0, 0, 0, 0, llvm::DIType(), llvm::DIArray()); @@ -507,10 +507,10 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty, FieldDecl *Field = *I; llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit); - const char *FieldName = Field->getNameAsCString(); + llvm::StringRef FieldName = Field->getName(); // Ignore unnamed fields. - if (!FieldName) + if (FieldName.empty()) continue; // Get the location for the field. @@ -558,7 +558,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty, uint64_t Align = M->getContext().getTypeAlign(Ty); llvm::DICompositeType RealDecl = - DebugFactory.CreateCompositeType(Tag, Unit, Decl->getNameAsString().data(), + DebugFactory.CreateCompositeType(Tag, Unit, Decl->getName(), DefUnit, Line, Size, Align, 0, 0, llvm::DIType(), Elements); @@ -592,7 +592,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty, // may refer to the forward decl if the struct is recursive) and replace all // uses of the forward declaration with the final definition. llvm::DICompositeType FwdDecl = - DebugFactory.CreateCompositeType(Tag, Unit, Decl->getNameAsCString(), + DebugFactory.CreateCompositeType(Tag, Unit, Decl->getName(), DefUnit, Line, 0, 0, 0, 0, llvm::DIType(), llvm::DIArray(), RuntimeLang); @@ -628,10 +628,10 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty, ObjCIvarDecl *Field = *I; llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit); - const char *FieldName = Field->getNameAsCString(); + llvm::StringRef FieldName = Field->getName(); // Ignore unnamed fields. - if (!FieldName) + if (FieldName.empty()) continue; // Get the location for the field. @@ -682,7 +682,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty, uint64_t Align = M->getContext().getTypeAlign(Ty); llvm::DICompositeType RealDecl = - DebugFactory.CreateCompositeType(Tag, Unit, Decl->getNameAsCString(), DefUnit, + DebugFactory.CreateCompositeType(Tag, Unit, Decl->getName(), DefUnit, Line, Size, Align, 0, 0, llvm::DIType(), Elements, RuntimeLang); @@ -703,7 +703,7 @@ llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty, for (EnumDecl::enumerator_iterator Enum = Decl->enumerator_begin(), EnumEnd = Decl->enumerator_end(); Enum != EnumEnd; ++Enum) { - Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getNameAsCString(), + Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getName(), Enum->getInitVal().getZExtValue())); } @@ -728,7 +728,7 @@ llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty, llvm::DIType DbgTy = DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type, - Unit, Decl->getNameAsCString(), DefUnit, Line, + Unit, Decl->getName(), DefUnit, Line, Size, Align, 0, 0, llvm::DIType(), EltArray); return DbgTy; @@ -1104,7 +1104,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *Decl, unsigned Tag, FieldAlign = Align*8; FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, - Decl->getNameAsCString(), DefUnit, + Decl->getName(), DefUnit, 0, FieldSize, FieldAlign, FieldOffset, 0, FieldTy); EltTys.push_back(FieldTy); @@ -1135,7 +1135,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *Decl, unsigned Tag, // Create the descriptor for the variable. llvm::DIVariable D = DebugFactory.CreateVariable(Tag, llvm::DIDescriptor(RegionStack.back()), - Decl->getNameAsCString(), + Decl->getName(), Unit, Line, Ty); // Insert an llvm.dbg.declare into the current block. llvm::Instruction *Call = @@ -1282,7 +1282,7 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag, XOffset = FieldOffset; FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, - Decl->getNameAsCString(), DefUnit, + Decl->getName(), DefUnit, 0, FieldSize, FieldAlign, FieldOffset, 0, FieldTy); EltTys.push_back(FieldTy); @@ -1336,7 +1336,7 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag, // Create the descriptor for the variable. llvm::DIVariable D = DebugFactory.CreateComplexVariable(Tag, llvm::DIDescriptor(RegionStack.back()), - Decl->getNameAsCString(), Unit, Line, Ty, + Decl->getName(), Unit, Line, Ty, addr); // Insert an llvm.dbg.declare into the current block. llvm::Instruction *Call = @@ -1392,9 +1392,9 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var, T = M->getContext().getConstantArrayType(ET, ConstVal, ArrayType::Normal, 0); } - const char *DeclName = Decl->getNameAsCString(); + llvm::StringRef DeclName = Decl->getName(); DebugFactory.CreateGlobalVariable(getContext(Decl, Unit), DeclName, DeclName, - NULL, Unit, LineNo, + llvm::StringRef(), Unit, LineNo, getOrCreateType(T, Unit), Var->hasInternalLinkage(), true/*definition*/, Var); @@ -1409,7 +1409,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var, PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation()); unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine(); - const char *Name = Decl->getNameAsCString(); + llvm::StringRef Name = Decl->getName(); QualType T = M->getContext().getObjCInterfaceType(Decl); if (T->isIncompleteArrayType()) { diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp index 349ede5..c047283 100644 --- a/lib/CodeGen/CGDecl.cpp +++ b/lib/CodeGen/CGDecl.cpp @@ -30,7 +30,9 @@ using namespace CodeGen; void CodeGenFunction::EmitDecl(const Decl &D) { switch (D.getKind()) { - default: assert(0 && "Unknown decl kind!"); + default: + CGM.ErrorUnsupported(&D, "decl"); + return; case Decl::ParmVar: assert(0 && "Parmdecls should not be in declstmts!"); case Decl::Function: // void X(); @@ -38,7 +40,9 @@ void CodeGenFunction::EmitDecl(const Decl &D) { case Decl::Enum: // enum X; case Decl::EnumConstant: // enum ? { X = ? } case Decl::CXXRecord: // struct/union/class X; [C++] - case Decl::UsingDirective: // using X; [C++] + case Decl::Using: // using X; [C++] + case Decl::UsingShadow: + case Decl::UsingDirective: // using namespace X; [C++] // None of these decls require codegen support. return; @@ -372,7 +376,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { { // Push a cleanup block and restore the stack there. - CleanupScope scope(*this); + DelayedCleanupBlock scope(*this); V = Builder.CreateLoad(Stack, "tmp"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore); @@ -517,7 +521,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { if (const ConstantArrayType *Array = getContext().getAsConstantArrayType(Ty)) { - CleanupScope Scope(*this); + DelayedCleanupBlock Scope(*this); QualType BaseElementTy = getContext().getBaseElementType(Array); const llvm::Type *BasePtr = ConvertType(BaseElementTy); BasePtr = llvm::PointerType::getUnqual(BasePtr); @@ -528,7 +532,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { // Make sure to jump to the exit block. EmitBranch(Scope.getCleanupExitBlock()); } else { - CleanupScope Scope(*this); + DelayedCleanupBlock Scope(*this); EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr); } } @@ -541,7 +545,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { llvm::Constant* F = CGM.GetAddrOfFunction(FD); assert(F && "Could not find function!"); - CleanupScope scope(*this); + DelayedCleanupBlock scope(*this); const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD); @@ -562,9 +566,9 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { } if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) { - CleanupScope scope(*this); + DelayedCleanupBlock scope(*this); llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); - V = Builder.CreateLoad(V, false); + V = Builder.CreateLoad(V); BuildBlockRelease(V); } } diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp index adfd005..420e275 100644 --- a/lib/CodeGen/CGException.cpp +++ b/lib/CodeGen/CGException.cpp @@ -11,6 +11,10 @@ // //===----------------------------------------------------------------------===// +#include "clang/AST/StmtCXX.h" + +#include "llvm/Intrinsics.h" + #include "CodeGenFunction.h" using namespace clang; using namespace CodeGen; @@ -35,29 +39,158 @@ static llvm::Constant *getThrowFn(CodeGenFunction &CGF) { std::vector<const llvm::Type*> Args(3, Int8PtrTy); const llvm::FunctionType *FTy = - llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), - Args, false); + llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), + Args, false); return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw"); } +static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) { + // void __cxa_rethrow (); + + const llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false); + + return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow"); +} + +static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) { + // void* __cxa_begin_catch (); + + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + std::vector<const llvm::Type*> Args(1, Int8PtrTy); + + const llvm::FunctionType *FTy = + llvm::FunctionType::get(Int8PtrTy, Args, false); + + return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); +} + +static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) { + // void __cxa_end_catch (); + + const llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false); + + return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch"); +} + +// FIXME: Eventually this will all go into the backend. Set from the target for +// now. +static int using_sjlj_exceptions = 0; + +static llvm::Constant *getUnwindResumeOrRethrowFn(CodeGenFunction &CGF) { + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + std::vector<const llvm::Type*> Args(1, Int8PtrTy); + + const llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), Args, + false); + + if (using_sjlj_exceptions) + return CGF.CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume"); + return CGF.CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow"); +} + +// CopyObject - Utility to copy an object. Calls copy constructor as necessary. +// N is casted to the right type. +static void CopyObject(CodeGenFunction &CGF, const Expr *E, llvm::Value *N) { + QualType ObjectType = E->getType(); + + // Store the throw exception in the exception object. + if (!CGF.hasAggregateLLVMType(ObjectType)) { + llvm::Value *Value = CGF.EmitScalarExpr(E); + const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0); + + CGF.Builder.CreateStore(Value, CGF.Builder.CreateBitCast(N, ValuePtrTy)); + } else { + const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(0); + const CXXRecordDecl *RD; + RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl()); + llvm::Value *This = CGF.Builder.CreateBitCast(N, Ty); + if (RD->hasTrivialCopyConstructor()) { + CGF.EmitAggExpr(E, This, false); + } else if (CXXConstructorDecl *CopyCtor + = RD->getCopyConstructor(CGF.getContext(), 0)) { + // FIXME: region management + llvm::Value *Src = CGF.EmitLValue(E).getAddress(); + + // Stolen from EmitClassAggrMemberwiseCopy + llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor, + Ctor_Complete); + CallArgList CallArgs; + CallArgs.push_back(std::make_pair(RValue::get(This), + CopyCtor->getThisType(CGF.getContext()))); + + // Push the Src ptr. + CallArgs.push_back(std::make_pair(RValue::get(Src), + CopyCtor->getParamDecl(0)->getType())); + QualType ResultType = + CopyCtor->getType()->getAs<FunctionType>()->getResultType(); + CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(ResultType, CallArgs), + Callee, CallArgs, CopyCtor); + // FIXME: region management + } else + CGF.ErrorUnsupported(E, "uncopyable object"); + } +} + +// CopyObject - Utility to copy an object. Calls copy constructor as necessary. +// N is casted to the right type. +static void CopyObject(CodeGenFunction &CGF, QualType ObjectType, + llvm::Value *E, llvm::Value *N) { + // Store the throw exception in the exception object. + if (!CGF.hasAggregateLLVMType(ObjectType)) { + llvm::Value *Value = E; + const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0); + + CGF.Builder.CreateStore(Value, CGF.Builder.CreateBitCast(N, ValuePtrTy)); + } else { + const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(0); + const CXXRecordDecl *RD; + RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl()); + llvm::Value *This = CGF.Builder.CreateBitCast(N, Ty); + if (RD->hasTrivialCopyConstructor()) { + CGF.EmitAggregateCopy(This, E, ObjectType); + } else if (CXXConstructorDecl *CopyCtor + = RD->getCopyConstructor(CGF.getContext(), 0)) { + // FIXME: region management + llvm::Value *Src = E; + + // Stolen from EmitClassAggrMemberwiseCopy + llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor, + Ctor_Complete); + CallArgList CallArgs; + CallArgs.push_back(std::make_pair(RValue::get(This), + CopyCtor->getThisType(CGF.getContext()))); + + // Push the Src ptr. + CallArgs.push_back(std::make_pair(RValue::get(Src), + CopyCtor->getParamDecl(0)->getType())); + QualType ResultType = + CopyCtor->getType()->getAs<FunctionType>()->getResultType(); + CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(ResultType, CallArgs), + Callee, CallArgs, CopyCtor); + // FIXME: region management + } else + llvm::llvm_unreachable("uncopyable object"); + } +} + void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) { - // FIXME: Handle rethrows. if (!E->getSubExpr()) { - ErrorUnsupported(E, "rethrow expression"); + Builder.CreateCall(getReThrowFn(*this))->setDoesNotReturn(); + Builder.CreateUnreachable(); + + // Clear the insertion point to indicate we are in unreachable code. + Builder.ClearInsertionPoint(); return; } QualType ThrowType = E->getSubExpr()->getType(); - // FIXME: We only handle non-class types for now. - if (ThrowType->isRecordType()) { - ErrorUnsupported(E, "throw expression"); - return; - } - // FIXME: Handle cleanup. if (!CleanupEntries.empty()){ - ErrorUnsupported(E, "throw expression"); + ErrorUnsupported(E, "throw expression with cleanup entries"); return; } @@ -71,28 +204,11 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) { llvm::ConstantInt::get(SizeTy, TypeSize), "exception"); - // Store the throw exception in the exception object. - if (!hasAggregateLLVMType(ThrowType)) { - llvm::Value *Value = EmitScalarExpr(E->getSubExpr()); - const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0); - - Builder.CreateStore(Value, Builder.CreateBitCast(ExceptionPtr, ValuePtrTy)); - } else { - // FIXME: Handle complex and aggregate expressions. - ErrorUnsupported(E, "throw expression"); - } + CopyObject(*this, E->getSubExpr(), ExceptionPtr); // Now throw the exception. const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext()); - - llvm::SmallString<256> OutName; - llvm::raw_svector_ostream Out(OutName); - mangleCXXRtti(CGM.getMangleContext(), ThrowType, Out); - - // FIXME: Is it OK to use CreateRuntimeVariable for this? - llvm::Constant *TypeInfo = - CGM.CreateRuntimeVariable(llvm::Type::getInt8Ty(getLLVMContext()), - OutName.c_str()); + llvm::Constant *TypeInfo = CGM.GenerateRtti(ThrowType); llvm::Constant *Dtor = llvm::Constant::getNullValue(Int8PtrTy); llvm::CallInst *ThrowCall = @@ -103,3 +219,217 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) { // Clear the insertion point to indicate we are in unreachable code. Builder.ClearInsertionPoint(); } + +void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) { +#if 1 + EmitStmt(S.getTryBlock()); + if (0) { + getBeginCatchFn(*this); + getEndCatchFn(*this); + getUnwindResumeOrRethrowFn(*this); + CopyObject(*this, QualType(), 0, 0); + } +#else + // FIXME: The below is still just a sketch of the code we need. + // Pointer to the personality function + llvm::Constant *Personality = + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty + (VMContext), + true), + "__gxx_personality_v0"); + Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty); + + llvm::BasicBlock *PrevLandingPad = getInvokeDest(); + llvm::BasicBlock *TryHandler = createBasicBlock("try.handler"); +#if 0 + llvm::BasicBlock *FinallyBlock = createBasicBlock("finally"); +#endif + llvm::BasicBlock *FinallyRethrow = createBasicBlock("finally.throw"); + llvm::BasicBlock *FinallyEnd = createBasicBlock("finally.end"); + +#if 0 + // Push an EH context entry, used for handling rethrows. + PushCleanupBlock(FinallyBlock); +#endif + + // Emit the statements in the try {} block + setInvokeDest(TryHandler); + + EmitStmt(S.getTryBlock()); + + // Jump to end if there is no exception + EmitBranchThroughCleanup(FinallyEnd); + + // Emit the handlers + EmitBlock(TryHandler); + + const llvm::IntegerType *Int8Ty; + const llvm::PointerType *PtrToInt8Ty; + Int8Ty = llvm::Type::getInt8Ty(VMContext); + // C string type. Used in lots of places. + PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty); + llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty); + llvm::SmallVector<llvm::Value*, 8> SelectorArgs; + llvm::Value *llvm_eh_exception = + CGM.getIntrinsic(llvm::Intrinsic::eh_exception); + llvm::Value *llvm_eh_selector = + CGM.getIntrinsic(llvm::Intrinsic::eh_selector); + llvm::Value *llvm_eh_typeid_for = + CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for); + // Exception object + llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc"); + llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow"); + + SelectorArgs.push_back(Exc); + SelectorArgs.push_back(Personality); + + bool HasCatchAll = false; + for (unsigned i = 0; i<S.getNumHandlers(); ++i) { + const CXXCatchStmt *C = S.getHandler(i); + VarDecl *CatchParam = C->getExceptionDecl(); + if (CatchParam) { + llvm::Value *EHType = CGM.GenerateRtti(C->getCaughtType().getNonReferenceType()); + SelectorArgs.push_back(EHType); + } else { + // null indicates catch all + SelectorArgs.push_back(Null); + HasCatchAll = true; + } + } + + // We use a cleanup unless there was already a catch all. + if (!HasCatchAll) { + SelectorArgs.push_back(Null); + } + + // Find which handler was matched. + llvm::Value *Selector + = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(), + SelectorArgs.end(), "selector"); + for (unsigned i = 0; i<S.getNumHandlers(); ++i) { + const CXXCatchStmt *C = S.getHandler(i); + VarDecl *CatchParam = C->getExceptionDecl(); + Stmt *CatchBody = C->getHandlerBlock(); + + llvm::BasicBlock *Next = 0; + + if (SelectorArgs[i+2] != Null) { + llvm::BasicBlock *Match = createBasicBlock("match"); + Next = createBasicBlock("catch.next"); + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext()); + llvm::Value *Id + = Builder.CreateCall(llvm_eh_typeid_for, + Builder.CreateBitCast(SelectorArgs[i+2], + Int8PtrTy)); + Builder.CreateCondBr(Builder.CreateICmpEQ(Selector, Id), + Match, Next); + EmitBlock(Match); + } + + llvm::BasicBlock *MatchEnd = createBasicBlock("match.end"); + llvm::BasicBlock *MatchHandler = createBasicBlock("match.handler"); + + PushCleanupBlock(MatchEnd); + setInvokeDest(MatchHandler); + + llvm::Value *ExcObject = Builder.CreateCall(getBeginCatchFn(*this), Exc); + + // Bind the catch parameter if it exists. + if (CatchParam) { + QualType CatchType = CatchParam->getType().getNonReferenceType(); + if (!CatchType.getTypePtr()->isPointerType()) + CatchType = getContext().getPointerType(CatchType); + ExcObject = + Builder.CreateBitCast(ExcObject, ConvertType(CatchType)); + // CatchParam is a ParmVarDecl because of the grammar + // construction used to handle this, but for codegen purposes + // we treat this as a local decl. + EmitLocalBlockVarDecl(*CatchParam); +#if 0 + // FIXME: objects with ctors, references + Builder.CreateStore(ExcObject, GetAddrOfLocalVar(CatchParam)); +#else + CopyObject(*this, CatchParam->getType().getNonReferenceType(), + ExcObject, GetAddrOfLocalVar(CatchParam)); +#endif + } + + EmitStmt(CatchBody); + EmitBranchThroughCleanup(FinallyEnd); + + EmitBlock(MatchHandler); + + llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc"); + // We are required to emit this call to satisfy LLVM, even + // though we don't use the result. + llvm::SmallVector<llvm::Value*, 8> Args; + Args.push_back(Exc); + Args.push_back(Personality); + Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), + 0)); + Builder.CreateCall(llvm_eh_selector, Args.begin(), Args.end()); + Builder.CreateStore(Exc, RethrowPtr); + EmitBranchThroughCleanup(FinallyRethrow); + + CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock(); + + EmitBlock(MatchEnd); + + // Unfortunately, we also have to generate another EH frame here + // in case this throws. + llvm::BasicBlock *MatchEndHandler = + createBasicBlock("match.end.handler"); + llvm::BasicBlock *Cont = createBasicBlock("myinvoke.cont"); + Builder.CreateInvoke(getEndCatchFn(*this), + Cont, MatchEndHandler, + Args.begin(), Args.begin()); + + EmitBlock(Cont); + if (Info.SwitchBlock) + EmitBlock(Info.SwitchBlock); + if (Info.EndBlock) + EmitBlock(Info.EndBlock); + + EmitBlock(MatchEndHandler); + Exc = Builder.CreateCall(llvm_eh_exception, "exc"); + // We are required to emit this call to satisfy LLVM, even + // though we don't use the result. + Args.clear(); + Args.push_back(Exc); + Args.push_back(Personality); + Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), + 0)); + Builder.CreateCall(llvm_eh_selector, Args.begin(), Args.end()); + Builder.CreateStore(Exc, RethrowPtr); + EmitBranchThroughCleanup(FinallyRethrow); + + if (Next) + EmitBlock(Next); + } + if (!HasCatchAll) + EmitBranchThroughCleanup(FinallyRethrow); + + CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock(); + + setInvokeDest(PrevLandingPad); + +#if 0 + EmitBlock(FinallyBlock); + + if (Info.SwitchBlock) + EmitBlock(Info.SwitchBlock); + if (Info.EndBlock) + EmitBlock(Info.EndBlock); + + // Branch around the rethrow code. + EmitBranch(FinallyEnd); +#endif + + EmitBlock(FinallyRethrow); + Builder.CreateCall(getUnwindResumeOrRethrowFn(*this), + Builder.CreateLoad(RethrowPtr)); + Builder.CreateUnreachable(); + + EmitBlock(FinallyEnd); +#endif +} diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index 2a544c5..63fca2d 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -137,7 +137,7 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, const CXXDestructorDecl *Dtor = ClassDecl->getDestructor(getContext()); - CleanupScope scope(*this); + DelayedCleanupBlock scope(*this); EmitCXXDestructorCall(Dtor, Dtor_Complete, Val.getAggregateAddr()); } } @@ -148,8 +148,8 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, if (BaseClassDecl) { llvm::Value *Derived = Val.getAggregateAddr(); llvm::Value *Base = - GetAddressCXXOfBaseClass(Derived, DerivedClassDecl, BaseClassDecl, - /*NullCheckValue=*/false); + GetAddressOfBaseClass(Derived, DerivedClassDecl, BaseClassDecl, + /*NullCheckValue=*/false); return RValue::get(Base); } } @@ -258,8 +258,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) { case Expr::BlockDeclRefExprClass: return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); - case Expr::CXXConditionDeclExprClass: - return EmitCXXConditionDeclLValue(cast<CXXConditionDeclExpr>(E)); case Expr::CXXTemporaryObjectExprClass: case Expr::CXXConstructExprClass: return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); @@ -314,9 +312,12 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) { llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, QualType Ty) { - llvm::Value *V = Builder.CreateLoad(Addr, Volatile, "tmp"); + llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp"); + if (Volatile) + Load->setVolatile(true); // Bool can have different representation in memory than in registers. + llvm::Value *V = Load; if (Ty->isBooleanType()) if (V->getType() != llvm::Type::getInt1Ty(VMContext)) V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool"); @@ -830,6 +831,24 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, return LV; } +static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, + const Expr *E, const FunctionDecl *FD) { + llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD); + if (!FD->hasPrototype()) { + if (const FunctionProtoType *Proto = + FD->getType()->getAs<FunctionProtoType>()) { + // Ugly case: for a K&R-style definition, the type of the definition + // isn't the same as the type of a use. Correct for this with a + // bitcast. + QualType NoProtoType = + CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); + NoProtoType = CGF.getContext().getPointerType(NoProtoType); + V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp"); + } + } + return LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); +} + LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { const NamedDecl *ND = E->getDecl(); @@ -851,7 +870,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { if (VD->hasAttr<BlocksAttr>()) { V = Builder.CreateStructGEP(V, 1, "forwarding"); - V = Builder.CreateLoad(V, false); + V = Builder.CreateLoad(V); V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), VD->getNameAsString()); } @@ -863,22 +882,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { return LV; } - if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { - llvm::Value* V = CGM.GetAddrOfFunction(FD); - if (!FD->hasPrototype()) { - if (const FunctionProtoType *Proto = - FD->getType()->getAs<FunctionProtoType>()) { - // Ugly case: for a K&R-style definition, the type of the definition - // isn't the same as the type of a use. Correct for this with a - // bitcast. - QualType NoProtoType = - getContext().getFunctionNoProtoType(Proto->getResultType()); - NoProtoType = getContext().getPointerType(NoProtoType); - V = Builder.CreateBitCast(V, ConvertType(NoProtoType), "tmp"); - } - } - return LValue::MakeAddr(V, MakeQualifiers(E->getType())); - } + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) + return EmitFunctionDeclLValue(*this, E, FD); if (E->getQualifier()) { // FIXME: the qualifier check does not seem sufficient here @@ -1165,7 +1170,10 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { if (VarDecl *VD = dyn_cast<VarDecl>(ND)) return EmitGlobalVarDeclLValue(*this, E, VD); - + + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) + return EmitFunctionDeclLValue(*this, E, FD); + assert(false && "Unhandled member declaration!"); return LValue(); } @@ -1328,8 +1336,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { // Perform the derived-to-base conversion llvm::Value *Base = - GetAddressCXXOfBaseClass(LV.getAddress(), DerivedClassDecl, - BaseClassDecl, /*NullCheckValue=*/false); + GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl, + BaseClassDecl, /*NullCheckValue=*/false); return LValue::MakeAddr(Base, MakeQualifiers(E->getType())); } @@ -1340,7 +1348,23 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); } case CastExpr::CK_BaseToDerived: { - return EmitUnsupportedLValue(E, "base-to-derived cast lvalue"); + const RecordType *BaseClassTy = + E->getSubExpr()->getType()->getAs<RecordType>(); + CXXRecordDecl *BaseClassDecl = + cast<CXXRecordDecl>(BaseClassTy->getDecl()); + + const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); + CXXRecordDecl *DerivedClassDecl = + cast<CXXRecordDecl>(DerivedClassTy->getDecl()); + + LValue LV = EmitLValue(E->getSubExpr()); + + // Perform the base-to-derived conversion + llvm::Value *Derived = + GetAddressOfDerivedClass(LV.getAddress(), BaseClassDecl, + DerivedClassDecl, /*NullCheckValue=*/false); + + return LValue::MakeAddr(Derived, MakeQualifiers(E->getType())); } case CastExpr::CK_BitCast: { // This must be a reinterpret_cast (or c-style equivalent). @@ -1460,12 +1484,6 @@ LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); } -LValue -CodeGenFunction::EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E) { - EmitLocalBlockVarDecl(*E->getVarDecl()); - return EmitDeclRefLValue(E); -} - LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), "tmp"); EmitCXXConstructExpr(Temp, E); diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp index 0e10368..d225d90 100644 --- a/lib/CodeGen/CGExprAgg.cpp +++ b/lib/CodeGen/CGExprAgg.cpp @@ -20,7 +20,6 @@ #include "llvm/Constants.h" #include "llvm/Function.h" #include "llvm/GlobalVariable.h" -#include "llvm/Support/Compiler.h" #include "llvm/Intrinsics.h" using namespace clang; using namespace CodeGen; @@ -30,7 +29,7 @@ using namespace CodeGen; //===----------------------------------------------------------------------===// namespace { -class VISIBILITY_HIDDEN AggExprEmitter : public StmtVisitor<AggExprEmitter> { +class AggExprEmitter : public StmtVisitor<AggExprEmitter> { CodeGenFunction &CGF; CGBuilderTy &Builder; llvm::Value *DestPtr; @@ -223,6 +222,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { break; } + case CastExpr::CK_DerivedToBaseMemberPointer: case CastExpr::CK_BaseToDerivedMemberPointer: { QualType SrcType = E->getSubExpr()->getType(); @@ -242,16 +242,22 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { llvm::Value *DstAdj = Builder.CreateStructGEP(DestPtr, 1, "dst.adj"); // Now See if we need to update the adjustment. - const CXXRecordDecl *SrcDecl = + const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(SrcType->getAs<MemberPointerType>()-> getClass()->getAs<RecordType>()->getDecl()); - const CXXRecordDecl *DstDecl = + const CXXRecordDecl *DerivedDecl = cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()-> getClass()->getAs<RecordType>()->getDecl()); - - llvm::Constant *Adj = CGF.CGM.GetCXXBaseClassOffset(DstDecl, SrcDecl); - if (Adj) - SrcAdj = Builder.CreateAdd(SrcAdj, Adj, "adj"); + if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer) + std::swap(DerivedDecl, BaseDecl); + + llvm::Constant *Adj = CGF.CGM.GetCXXBaseClassOffset(DerivedDecl, BaseDecl); + if (Adj) { + if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer) + SrcAdj = Builder.CreateSub(SrcAdj, Adj, "adj"); + else + SrcAdj = Builder.CreateAdd(SrcAdj, Adj, "adj"); + } Builder.CreateStore(SrcAdj, DstAdj, VolatileDest); break; @@ -389,21 +395,21 @@ void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) { llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond()); Builder.CreateCondBr(Cond, LHSBlock, RHSBlock); - CGF.PushConditionalTempDestruction(); + CGF.StartConditionalBranch(); CGF.EmitBlock(LHSBlock); // Handle the GNU extension for missing LHS. assert(E->getLHS() && "Must have LHS for aggregate value"); Visit(E->getLHS()); - CGF.PopConditionalTempDestruction(); + CGF.FinishConditionalBranch(); CGF.EmitBranch(ContBlock); - CGF.PushConditionalTempDestruction(); + CGF.StartConditionalBranch(); CGF.EmitBlock(RHSBlock); Visit(E->getRHS()); - CGF.PopConditionalTempDestruction(); + CGF.FinishConditionalBranch(); CGF.EmitBranch(ContBlock); CGF.EmitBlock(ContBlock); diff --git a/lib/CodeGen/CGCXXExpr.cpp b/lib/CodeGen/CGExprCXX.cpp index cd7d21b..b982c15 100644 --- a/lib/CodeGen/CGCXXExpr.cpp +++ b/lib/CodeGen/CGExprCXX.cpp @@ -1,4 +1,4 @@ -//===--- CGCXXExpr.cpp - Emit LLVM Code for C++ expressions ---------------===// +//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// // // The LLVM Compiler Infrastructure // @@ -83,38 +83,41 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, llvm::Value *NewPtr, llvm::Value *NumElements) { + if (E->isArray()) { + if (CXXConstructorDecl *Ctor = E->getConstructor()) + CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr, + E->constructor_arg_begin(), + E->constructor_arg_end()); + return; + } + QualType AllocType = E->getAllocatedType(); - if (!E->isArray()) { - if (CXXConstructorDecl *Ctor = E->getConstructor()) { - CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr, - E->constructor_arg_begin(), - E->constructor_arg_end()); + if (CXXConstructorDecl *Ctor = E->getConstructor()) { + CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr, + E->constructor_arg_begin(), + E->constructor_arg_end()); - return; - } + return; + } - // We have a POD type. - if (E->getNumConstructorArgs() == 0) - return; + // We have a POD type. + if (E->getNumConstructorArgs() == 0) + return; - assert(E->getNumConstructorArgs() == 1 && - "Can only have one argument to initializer of POD type."); + assert(E->getNumConstructorArgs() == 1 && + "Can only have one argument to initializer of POD type."); - const Expr *Init = E->getConstructorArg(0); + const Expr *Init = E->getConstructorArg(0); - if (!CGF.hasAggregateLLVMType(AllocType)) - CGF.Builder.CreateStore(CGF.EmitScalarExpr(Init), NewPtr); - else if (AllocType->isAnyComplexType()) - CGF.EmitComplexExprIntoAddr(Init, NewPtr, - AllocType.isVolatileQualified()); - else - CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified()); - return; - } - - if (CXXConstructorDecl *Ctor = E->getConstructor()) - CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr); + if (!CGF.hasAggregateLLVMType(AllocType)) + CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr, + AllocType.isVolatileQualified(), AllocType); + else if (AllocType->isAnyComplexType()) + CGF.EmitComplexExprIntoAddr(Init, NewPtr, + AllocType.isVolatileQualified()); + else + CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified()); } llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { @@ -359,7 +362,7 @@ llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { return Builder.CreateBitCast(CGM.GenerateRttiRef(RD), LTy); return Builder.CreateBitCast(CGM.GenerateRtti(RD), LTy); } - return Builder.CreateBitCast(CGM.GenerateRttiNonClass(Ty), LTy); + return Builder.CreateBitCast(CGM.GenerateRtti(Ty), LTy); } Expr *subE = E->getExprOperand(); Ty = subE->getType(); @@ -403,7 +406,7 @@ llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { } return Builder.CreateBitCast(CGM.GenerateRtti(RD), LTy); } - return Builder.CreateBitCast(CGM.GenerateRttiNonClass(Ty), LTy); + return Builder.CreateBitCast(CGM.GenerateRtti(Ty), LTy); } llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V, diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp index 9e81e4f..7fa8ffb 100644 --- a/lib/CodeGen/CGExprComplex.cpp +++ b/lib/CodeGen/CGExprComplex.cpp @@ -18,7 +18,6 @@ #include "llvm/Constants.h" #include "llvm/Function.h" #include "llvm/ADT/SmallString.h" -#include "llvm/Support/Compiler.h" using namespace clang; using namespace CodeGen; @@ -29,7 +28,7 @@ using namespace CodeGen; typedef CodeGenFunction::ComplexPairTy ComplexPairTy; namespace { -class VISIBILITY_HIDDEN ComplexExprEmitter +class ComplexExprEmitter : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> { CodeGenFunction &CGF; CGBuilderTy &Builder; @@ -261,34 +260,18 @@ public: /// load the real and imaginary pieces, returning them as Real/Imag. ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile) { - llvm::SmallString<64> Name(SrcPtr->getName().begin(), - SrcPtr->getName().end()); - llvm::Value *Real=0, *Imag=0; if (!IgnoreReal) { - // FIXME: Clean this up once builder takes Twine/StringRef. - Name += ".realp"; - llvm::Value *RealPtr = Builder.CreateStructGEP(SrcPtr, 0, - Name.str().str().c_str()); - - Name.pop_back(); // .realp -> .real - // FIXME: Clean this up once builder takes Twine/StringRef. - Real = Builder.CreateLoad(RealPtr, isVolatile, - Name.str().str().c_str()); - Name.resize(Name.size()-4); // .real -> .imagp + llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0, + SrcPtr->getName() + ".realp"); + Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real"); } if (!IgnoreImag) { - Name += "imagp"; - - // FIXME: Clean this up once builder takes Twine/StringRef. - llvm::Value *ImagPtr = Builder.CreateStructGEP(SrcPtr, 1, - Name.str().str().c_str()); - - Name.pop_back(); // .imagp -> .imag - // FIXME: Clean this up once builder takes Twine/StringRef. - Imag = Builder.CreateLoad(ImagPtr, isVolatile, Name.str().str().c_str()); + llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1, + SrcPtr->getName() + ".imagp"); + Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag"); } return ComplexPairTy(Real, Imag); } diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp index 40b845d..9289f78 100644 --- a/lib/CodeGen/CGExprConstant.cpp +++ b/lib/CodeGen/CGExprConstant.cpp @@ -22,14 +22,12 @@ #include "llvm/Constants.h" #include "llvm/Function.h" #include "llvm/GlobalVariable.h" -#include "llvm/Support/Compiler.h" #include "llvm/Target/TargetData.h" using namespace clang; using namespace CodeGen; namespace { - -class VISIBILITY_HIDDEN ConstStructBuilder { +class ConstStructBuilder { CodeGenModule &CGM; CodeGenFunction *CGF; @@ -377,7 +375,7 @@ public: } }; -class VISIBILITY_HIDDEN ConstExprEmitter : +class ConstExprEmitter : public StmtVisitor<ConstExprEmitter, llvm::Constant*> { CodeGenModule &CGM; CodeGenFunction *CGF; @@ -413,9 +411,10 @@ public: // Get the function pointer (or index if this is a virtual function). if (MD->isVirtual()) { - int64_t Index = CGM.getVtableInfo().getMethodVtableIndex(MD); + uint64_t Index = CGM.getVtableInfo().getMethodVtableIndex(MD); - Values[0] = llvm::ConstantInt::get(PtrDiffTy, Index + 1); + // The pointer is 1 + the virtual table offset in bytes. + Values[0] = llvm::ConstantInt::get(PtrDiffTy, (Index * 8) + 1); } else { llvm::Constant *FuncPtr = CGM.GetAddrOfFunction(MD); @@ -673,7 +672,7 @@ public: if (ILE->getType()->isArrayType()) return EmitArrayInitialization(ILE); - if (ILE->getType()->isStructureType()) + if (ILE->getType()->isRecordType()) return EmitStructInitialization(ILE); if (ILE->getType()->isUnionType()) diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp index e9bbf35..c1cbecc 100644 --- a/lib/CodeGen/CGExprScalar.cpp +++ b/lib/CodeGen/CGExprScalar.cpp @@ -24,7 +24,6 @@ #include "llvm/GlobalVariable.h" #include "llvm/Intrinsics.h" #include "llvm/Module.h" -#include "llvm/Support/Compiler.h" #include "llvm/Support/CFG.h" #include "llvm/Target/TargetData.h" #include <cstdarg> @@ -45,7 +44,7 @@ struct BinOpInfo { }; namespace { -class VISIBILITY_HIDDEN ScalarExprEmitter +class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, Value*> { CodeGenFunction &CGF; CGBuilderTy &Builder; @@ -141,8 +140,11 @@ public: // l-values. Value *VisitDeclRefExpr(DeclRefExpr *E) { - if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl())) - return llvm::ConstantInt::get(VMContext, EC->getInitVal()); + Expr::EvalResult Result; + if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) { + assert(!Result.HasSideEffects && "Constant declref with side-effect?!"); + return llvm::ConstantInt::get(VMContext, Result.Val.getInt()); + } return EmitLoadOfLValue(E); } Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { @@ -167,7 +169,7 @@ public: Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); - Value *VisitMemberExpr(Expr *E) { return EmitLoadOfLValue(E); } + Value *VisitMemberExpr(MemberExpr *E); Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { return EmitLoadOfLValue(E); @@ -184,14 +186,14 @@ public: Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { return llvm::Constant::getNullValue(ConvertType(E->getType())); } - Value *VisitCastExpr(const CastExpr *E) { + Value *VisitCastExpr(CastExpr *E) { // Make sure to evaluate VLA bounds now so that we have them for later. if (E->getType()->isVariablyModifiedType()) CGF.EmitVLASize(E->getType()); return EmitCastExpr(E); } - Value *EmitCastExpr(const CastExpr *E); + Value *EmitCastExpr(CastExpr *E); Value *VisitCallExpr(const CallExpr *E) { if (E->getCallReturnType()->isReferenceType()) @@ -558,6 +560,17 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size()); return Builder.CreateShuffleVector(V1, V2, SV, "shuffle"); } +Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { + Expr::EvalResult Result; + if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) { + if (E->isArrow()) + CGF.EmitScalarExpr(E->getBase()); + else + EmitLValue(E->getBase()); + return llvm::ConstantInt::get(VMContext, Result.Val.getInt()); + } + return EmitLoadOfLValue(E); +} Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { TestAndClearIgnoreResultAssign(); @@ -748,23 +761,40 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { return V; } +static bool ShouldNullCheckClassCastValue(const CastExpr *CE) { + const Expr *E = CE->getSubExpr(); + + if (isa<CXXThisExpr>(E)) { + // We always assume that 'this' is never null. + return false; + } + + if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { + // And that lvalue casts are never null. + if (ICE->isLvalueCast()) + return false; + } + + return true; +} + // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts // have to handle a more broad range of conversions than explicit casts, as they // handle things like function to ptr-to-function decay etc. -Value *ScalarExprEmitter::EmitCastExpr(const CastExpr *CE) { - const Expr *E = CE->getSubExpr(); +Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) { + Expr *E = CE->getSubExpr(); QualType DestTy = CE->getType(); CastExpr::CastKind Kind = CE->getCastKind(); if (!DestTy->isVoidType()) TestAndClearIgnoreResultAssign(); + // Since almost all cast kinds apply to scalars, this switch doesn't have + // a default case, so the compiler will warn on a missing case. The cases + // are in the same order as in the CastKind enum. switch (Kind) { - default: - //return CGF.ErrorUnsupported(E, "type of cast"); - break; - case CastExpr::CK_Unknown: + // FIXME: All casts should have a known kind! //assert(0 && "Unknown cast kind!"); break; @@ -775,6 +805,18 @@ Value *ScalarExprEmitter::EmitCastExpr(const CastExpr *CE) { case CastExpr::CK_NoOp: return Visit(const_cast<Expr*>(E)); + case CastExpr::CK_BaseToDerived: { + const CXXRecordDecl *BaseClassDecl = + E->getType()->getCXXRecordDeclForPointerType(); + const CXXRecordDecl *DerivedClassDecl = + DestTy->getCXXRecordDeclForPointerType(); + + Value *Src = Visit(const_cast<Expr*>(E)); + + bool NullCheckValue = ShouldNullCheckClassCastValue(CE); + return CGF.GetAddressOfDerivedClass(Src, BaseClassDecl, DerivedClassDecl, + NullCheckValue); + } case CastExpr::CK_DerivedToBase: { const RecordType *DerivedClassTy = E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>(); @@ -787,23 +829,19 @@ Value *ScalarExprEmitter::EmitCastExpr(const CastExpr *CE) { Value *Src = Visit(const_cast<Expr*>(E)); - bool NullCheckValue = true; - - if (isa<CXXThisExpr>(E)) { - // We always assume that 'this' is never null. - NullCheckValue = false; - } else if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { - // And that lvalue casts are never null. - if (ICE->isLvalueCast()) - NullCheckValue = false; - } - return CGF.GetAddressCXXOfBaseClass(Src, DerivedClassDecl, BaseClassDecl, - NullCheckValue); + bool NullCheckValue = ShouldNullCheckClassCastValue(CE); + return CGF.GetAddressOfBaseClass(Src, DerivedClassDecl, BaseClassDecl, + NullCheckValue); + } + case CastExpr::CK_Dynamic: { + Value *V = Visit(const_cast<Expr*>(E)); + const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); + return CGF.EmitDynamicCast(V, DCE); } - case CastExpr::CK_ToUnion: { + case CastExpr::CK_ToUnion: assert(0 && "Should be unreachable!"); break; - } + case CastExpr::CK_ArrayToPointerDecay: { assert(E->getType()->isArrayType() && "Array to pointer decay must have array source type!"); @@ -828,6 +866,35 @@ Value *ScalarExprEmitter::EmitCastExpr(const CastExpr *CE) { case CastExpr::CK_NullToMemberPointer: return CGF.CGM.EmitNullConstant(DestTy); + case CastExpr::CK_BaseToDerivedMemberPointer: + case CastExpr::CK_DerivedToBaseMemberPointer: { + Value *Src = Visit(E); + + // See if we need to adjust the pointer. + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()-> + getClass()->getAs<RecordType>()->getDecl()); + const CXXRecordDecl *DerivedDecl = + cast<CXXRecordDecl>(CE->getType()->getAs<MemberPointerType>()-> + getClass()->getAs<RecordType>()->getDecl()); + if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer) + std::swap(DerivedDecl, BaseDecl); + + llvm::Constant *Adj = CGF.CGM.GetCXXBaseClassOffset(DerivedDecl, BaseDecl); + if (Adj) { + if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer) + Src = Builder.CreateSub(Src, Adj, "adj"); + else + Src = Builder.CreateAdd(Src, Adj, "adj"); + } + return Src; + } + + case CastExpr::CK_UserDefinedConversion: + case CastExpr::CK_ConstructorConversion: + assert(0 && "Should be unreachable!"); + break; + case CastExpr::CK_IntegralToPointer: { Value *Src = Visit(const_cast<Expr*>(E)); @@ -841,23 +908,14 @@ Value *ScalarExprEmitter::EmitCastExpr(const CastExpr *CE) { return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy)); } - case CastExpr::CK_PointerToIntegral: { Value *Src = Visit(const_cast<Expr*>(E)); return Builder.CreatePtrToInt(Src, ConvertType(DestTy)); } - case CastExpr::CK_ToVoid: { CGF.EmitAnyExpr(E, 0, false, true); return 0; } - - case CastExpr::CK_Dynamic: { - Value *V = Visit(const_cast<Expr*>(E)); - const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); - return CGF.EmitDynamicCast(V, DCE); - } - case CastExpr::CK_VectorSplat: { const llvm::Type *DstTy = ConvertType(DestTy); Value *Elt = Visit(const_cast<Expr*>(E)); @@ -879,7 +937,40 @@ Value *ScalarExprEmitter::EmitCastExpr(const CastExpr *CE) { llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); return Yay; } + case CastExpr::CK_IntegralCast: + case CastExpr::CK_IntegralToFloating: + case CastExpr::CK_FloatingToIntegral: + case CastExpr::CK_FloatingCast: + return EmitScalarConversion(Visit(E), E->getType(), DestTy); + case CastExpr::CK_MemberPointerToBoolean: { + const MemberPointerType* T = E->getType()->getAs<MemberPointerType>(); + + if (T->getPointeeType()->isFunctionType()) { + // We have a member function pointer. + llvm::Value *Ptr = CGF.CreateTempAlloca(ConvertType(E->getType())); + + CGF.EmitAggExpr(E, Ptr, /*VolatileDest=*/false); + + // Get the pointer. + llvm::Value *FuncPtr = Builder.CreateStructGEP(Ptr, 0, "src.ptr"); + FuncPtr = Builder.CreateLoad(FuncPtr); + + llvm::Value *IsNotNull = + Builder.CreateICmpNE(FuncPtr, + llvm::Constant::getNullValue(FuncPtr->getType()), + "tobool"); + + return IsNotNull; + } + + // We have a regular member pointer. + Value *Ptr = Visit(const_cast<Expr*>(E)); + llvm::Value *IsNotNull = + Builder.CreateICmpNE(Ptr, CGF.CGM.EmitNullConstant(E->getType()), + "tobool"); + return IsNotNull; + } } // Handle cases where the source is an non-complex type. @@ -924,7 +1015,7 @@ Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) { llvm::Value *V = CGF.GetAddrOfBlockDecl(E); if (E->getType().isObjCGCWeak()) return CGF.CGM.getObjCRuntime().EmitObjCWeakRead(CGF, V); - return Builder.CreateLoad(V, false, "tmp"); + return Builder.CreateLoad(V, "tmp"); } //===----------------------------------------------------------------------===// @@ -1583,10 +1674,10 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { PI != PE; ++PI) PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); - CGF.PushConditionalTempDestruction(); + CGF.StartConditionalBranch(); CGF.EmitBlock(RHSBlock); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); - CGF.PopConditionalTempDestruction(); + CGF.FinishConditionalBranch(); // Reaquire the RHS block, as there may be subblocks inserted. RHSBlock = Builder.GetInsertBlock(); @@ -1633,13 +1724,13 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { PI != PE; ++PI) PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); - CGF.PushConditionalTempDestruction(); + CGF.StartConditionalBranch(); // Emit the RHS condition as a bool value. CGF.EmitBlock(RHSBlock); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); - CGF.PopConditionalTempDestruction(); + CGF.FinishConditionalBranch(); // Reaquire the RHS block, as there may be subblocks inserted. RHSBlock = Builder.GetInsertBlock(); @@ -1753,7 +1844,7 @@ VisitConditionalOperator(const ConditionalOperator *E) { Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock); } - CGF.PushConditionalTempDestruction(); + CGF.StartConditionalBranch(); CGF.EmitBlock(LHSBlock); // Handle the GNU extension for missing LHS. @@ -1763,15 +1854,15 @@ VisitConditionalOperator(const ConditionalOperator *E) { else // Perform promotions, to handle cases like "short ?: int" LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType()); - CGF.PopConditionalTempDestruction(); + CGF.FinishConditionalBranch(); LHSBlock = Builder.GetInsertBlock(); CGF.EmitBranch(ContBlock); - CGF.PushConditionalTempDestruction(); + CGF.StartConditionalBranch(); CGF.EmitBlock(RHSBlock); Value *RHS = Visit(E->getRHS()); - CGF.PopConditionalTempDestruction(); + CGF.FinishConditionalBranch(); RHSBlock = Builder.GetInsertBlock(); CGF.EmitBranch(ContBlock); diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp index b431daa..be772c7 100644 --- a/lib/CodeGen/CGObjCGNU.cpp +++ b/lib/CodeGen/CGObjCGNU.cpp @@ -747,9 +747,14 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList( std::vector<llvm::Constant*> Elements; for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end(); iter != endIter ; iter++) { - llvm::Constant *protocol = ExistingProtocols[*iter]; - if (!protocol) + llvm::Constant *protocol = 0; + llvm::StringMap<llvm::Constant*>::iterator value = + ExistingProtocols.find(*iter); + if (value == ExistingProtocols.end()) { protocol = GenerateEmptyProtocol(*iter); + } else { + protocol = value->getValue(); + } llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol, PtrToInt8Ty); Elements.push_back(Ptr); @@ -1366,8 +1371,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { ConstantStrings.size() + 1); ConstantStrings.push_back(NULLPtr); - const char *StringClass = CGM.getLangOptions().ObjCConstantStringClass; - if (!StringClass) StringClass = "NXConstantString"; + llvm::StringRef StringClass = CGM.getLangOptions().ObjCConstantStringClass; + if (StringClass.empty()) StringClass = "NXConstantString"; Elements.push_back(MakeConstantString(StringClass, ".objc_static_class_name")); Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy, diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp index 4355e66..2e8ab29 100644 --- a/lib/CodeGen/CGObjCMac.cpp +++ b/lib/CodeGen/CGObjCMac.cpp @@ -2993,7 +2993,7 @@ llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder, 4, true); } - return Builder.CreateLoad(Entry, false, "tmp"); + return Builder.CreateLoad(Entry, "tmp"); } llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) { @@ -3009,7 +3009,7 @@ llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) { 4, true); } - return Builder.CreateLoad(Entry, false, "tmp"); + return Builder.CreateLoad(Entry, "tmp"); } llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) { @@ -4516,7 +4516,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder, llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName); if (PTGV) - return Builder.CreateLoad(PTGV, false, "tmp"); + return Builder.CreateLoad(PTGV, "tmp"); PTGV = new llvm::GlobalVariable( CGM.getModule(), Init->getType(), false, @@ -4526,7 +4526,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder, PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip"); PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility); CGM.AddUsedGlobal(PTGV); - return Builder.CreateLoad(PTGV, false, "tmp"); + return Builder.CreateLoad(PTGV, "tmp"); } /// GenerateCategory - Build metadata for a category implementation. @@ -5031,8 +5031,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset( CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar) { - return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar), - false, "ivar"); + return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),"ivar"); } CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend( @@ -5187,7 +5186,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder, CGM.AddUsedGlobal(Entry); } - return Builder.CreateLoad(Entry, false, "tmp"); + return Builder.CreateLoad(Entry, "tmp"); } llvm::Value * @@ -5210,7 +5209,7 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder, CGM.AddUsedGlobal(Entry); } - return Builder.CreateLoad(Entry, false, "tmp"); + return Builder.CreateLoad(Entry, "tmp"); } /// EmitMetaClassRef - Return a Value * of the address of _class_t @@ -5220,7 +5219,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder, const ObjCInterfaceDecl *ID) { llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()]; if (Entry) - return Builder.CreateLoad(Entry, false, "tmp"); + return Builder.CreateLoad(Entry, "tmp"); std::string MetaClassName(getMetaclassSymbolPrefix() + ID->getNameAsString()); llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName); @@ -5236,7 +5235,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder, Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip"); CGM.AddUsedGlobal(Entry); - return Builder.CreateLoad(Entry, false, "tmp"); + return Builder.CreateLoad(Entry, "tmp"); } /// GetClass - Return a reference to the class for the given interface @@ -5323,7 +5322,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder, CGM.AddUsedGlobal(Entry); } - return Builder.CreateLoad(Entry, false, "tmp"); + return Builder.CreateLoad(Entry, "tmp"); } /// EmitObjCIvarAssign - Code gen for assigning to a __strong object. /// objc_assign_ivar (id src, id *dst, ptrdiff_t) diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp index a63c832..1a9bc39 100644 --- a/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -330,36 +330,6 @@ void CGRecordLayoutBuilder::CheckForMemberPointer(const FieldDecl *FD) { } -static const CXXMethodDecl *GetKeyFunction(const RecordDecl *D) { - const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D); - if (!RD || !RD->isDynamicClass()) - return 0; - - for (CXXRecordDecl::method_iterator I = RD->method_begin(), - E = RD->method_end(); I != E; ++I) { - const CXXMethodDecl *MD = *I; - - if (!MD->isVirtual()) - continue; - - if (MD->isPure()) - continue; - - // FIXME: This doesn't work. If we have an out of line body, that body will - // set the MD to have a body, what we want to know is, was the body present - // inside the declaration of the class. For now, we just avoid the problem - // by pretending there is no key function. - return 0; - if (MD->getBody()) - continue; - - // We found it. - return MD; - } - - return 0; -} - CGRecordLayout * CGRecordLayoutBuilder::ComputeLayout(CodeGenTypes &Types, const RecordDecl *D) { @@ -389,7 +359,5 @@ CGRecordLayoutBuilder::ComputeLayout(CodeGenTypes &Types, Types.addBitFieldInfo(Info.FD, Info.FieldNo, Info.Start, Info.Size); } - const CXXMethodDecl *KeyFunction = GetKeyFunction(D); - - return new CGRecordLayout(Ty, Builder.ContainsMemberPointer, KeyFunction); + return new CGRecordLayout(Ty, Builder.ContainsMemberPointer); } diff --git a/lib/CodeGen/CGRtti.cpp b/lib/CodeGen/CGRtti.cpp index 79d8664..43fcb31 100644 --- a/lib/CodeGen/CGRtti.cpp +++ b/lib/CodeGen/CGRtti.cpp @@ -49,9 +49,8 @@ public: llvm::Constant *BuildName(QualType Ty, bool Hidden, bool Extern) { llvm::SmallString<256> OutName; - llvm::raw_svector_ostream Out(OutName); - mangleCXXRttiName(CGM.getMangleContext(), Ty, Out); - llvm::StringRef Name = Out.str(); + CGM.getMangleContext().mangleCXXRttiName(Ty, OutName); + llvm::StringRef Name = OutName.str(); llvm::GlobalVariable::LinkageTypes linktype; linktype = llvm::GlobalValue::LinkOnceODRLinkage; @@ -99,9 +98,8 @@ public: return llvm::Constant::getNullValue(Int8PtrTy); llvm::SmallString<256> OutName; - llvm::raw_svector_ostream Out(OutName); - mangleCXXRtti(CGM.getMangleContext(), Ty, Out); - llvm::StringRef Name = Out.str(); + CGM.getMangleContext().mangleCXXRtti(Ty, OutName); + llvm::StringRef Name = OutName.str(); C = CGM.getModule().getGlobalVariable(Name); if (C) @@ -194,10 +192,9 @@ public: llvm::Constant *C; llvm::SmallString<256> OutName; - llvm::raw_svector_ostream Out(OutName); - mangleCXXRtti(CGM.getMangleContext(), CGM.getContext().getTagDeclType(RD), - Out); - llvm::StringRef Name = Out.str(); + CGM.getMangleContext().mangleCXXRtti(CGM.getContext().getTagDeclType(RD), + OutName); + llvm::StringRef Name = OutName.str(); llvm::GlobalVariable *GV; GV = CGM.getModule().getGlobalVariable(Name); @@ -260,13 +257,6 @@ public: return llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), f); } - llvm::Constant *BuildType2(QualType Ty) { - if (const RecordType *RT = Ty.getTypePtr()->getAs<RecordType>()) - if (const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl())) - return Buildclass_type_info(RD); - return BuildType(Ty); - } - bool DecideExtern(QualType Ty) { // For this type, see if all components are never in an anonymous namespace. if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>()) @@ -297,9 +287,8 @@ public: llvm::Constant *C; llvm::SmallString<256> OutName; - llvm::raw_svector_ostream Out(OutName); - mangleCXXRtti(CGM.getMangleContext(), Ty, Out); - llvm::StringRef Name = Out.str(); + CGM.getMangleContext().mangleCXXRtti(Ty, OutName); + llvm::StringRef Name = OutName.str(); llvm::GlobalVariable *GV; GV = CGM.getModule().getGlobalVariable(Name); @@ -338,10 +327,10 @@ public: info.push_back(BuildInt(flags)); info.push_back(BuildInt(0)); - info.push_back(BuildType2(PTy)); + info.push_back(BuildType(PTy)); if (PtrMem) - info.push_back(BuildType2(BTy)); + info.push_back(BuildType(BTy)); // We always generate these as hidden, only the name isn't hidden. return finish(info, GV, Name, true, Extern); @@ -351,9 +340,8 @@ public: llvm::Constant *C; llvm::SmallString<256> OutName; - llvm::raw_svector_ostream Out(OutName); - mangleCXXRtti(CGM.getMangleContext(), Ty, Out); - llvm::StringRef Name = Out.str(); + CGM.getMangleContext().mangleCXXRtti(Ty, OutName); + llvm::StringRef Name = OutName.str(); llvm::GlobalVariable *GV; GV = CGM.getModule().getGlobalVariable(Name); @@ -376,6 +364,11 @@ public: llvm::Constant *BuildType(QualType Ty) { const clang::Type &Type = *CGM.getContext().getCanonicalType(Ty).getTypePtr(); + + if (const RecordType *RT = Ty.getTypePtr()->getAs<RecordType>()) + if (const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl())) + return Buildclass_type_info(RD); + switch (Type.getTypeClass()) { default: { assert(0 && "typeid expression"); @@ -426,7 +419,7 @@ llvm::Constant *CodeGenModule::GenerateRtti(const CXXRecordDecl *RD) { return b.Buildclass_type_info(RD); } -llvm::Constant *CodeGenModule::GenerateRttiNonClass(QualType Ty) { +llvm::Constant *CodeGenModule::GenerateRtti(QualType Ty) { RttiBuilder b(*this); return b.BuildType(Ty); diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp index b6d7b39..bbd5462 100644 --- a/lib/CodeGen/CGStmt.cpp +++ b/lib/CodeGen/CGStmt.cpp @@ -153,9 +153,7 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, } // Keep track of the current cleanup stack depth. - size_t CleanupStackDepth = CleanupEntries.size(); - bool OldDidCallStackSave = DidCallStackSave; - DidCallStackSave = false; + CleanupScope Scope(*this); for (CompoundStmt::const_body_iterator I = S.body_begin(), E = S.body_end()-GetLast; I != E; ++I) @@ -185,10 +183,6 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc); } - DidCallStackSave = OldDidCallStackSave; - - EmitCleanupBlocks(CleanupStackDepth); - return RV; } @@ -294,6 +288,10 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { void CodeGenFunction::EmitIfStmt(const IfStmt &S) { // C99 6.8.4.1: The first substatement is executed if the expression compares // unequal to 0. The condition must be a scalar type. + CleanupScope ConditionScope(*this); + + if (S.getConditionVariable()) + EmitLocalBlockVarDecl(*S.getConditionVariable()); // If the condition constant folds and can be elided, try to avoid emitting // the condition and the dead arm of the if/else. @@ -306,8 +304,10 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { // If the skipped block has no labels in it, just emit the executed block. // This avoids emitting dead code and simplifies the CFG substantially. if (!ContainsLabel(Skipped)) { - if (Executed) + if (Executed) { + CleanupScope ExecutedScope(*this); EmitStmt(Executed); + } return; } } @@ -322,14 +322,20 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock); // Emit the 'then' code. - EmitBlock(ThenBlock); - EmitStmt(S.getThen()); + EmitBlock(ThenBlock); + { + CleanupScope ThenScope(*this); + EmitStmt(S.getThen()); + } EmitBranch(ContBlock); // Emit the 'else' code if present. if (const Stmt *Else = S.getElse()) { EmitBlock(ElseBlock); - EmitStmt(Else); + { + CleanupScope ElseScope(*this); + EmitStmt(Else); + } EmitBranch(ContBlock); } @@ -347,15 +353,37 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) { // body of the loop. llvm::BasicBlock *ExitBlock = createBasicBlock("while.end"); llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); + llvm::BasicBlock *CleanupBlock = 0; + llvm::BasicBlock *EffectiveExitBlock = ExitBlock; // Store the blocks to use for break and continue. BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader)); + // C++ [stmt.while]p2: + // When the condition of a while statement is a declaration, the + // scope of the variable that is declared extends from its point + // of declaration (3.3.2) to the end of the while statement. + // [...] + // The object created in a condition is destroyed and created + // with each iteration of the loop. + CleanupScope ConditionScope(*this); + + if (S.getConditionVariable()) { + EmitLocalBlockVarDecl(*S.getConditionVariable()); + + // If this condition variable requires cleanups, create a basic + // block to handle those cleanups. + if (ConditionScope.requiresCleanups()) { + CleanupBlock = createBasicBlock("while.cleanup"); + EffectiveExitBlock = CleanupBlock; + } + } + // Evaluate the conditional in the while header. C99 6.8.5.1: The // evaluation of the controlling expression takes place before each // execution of the loop body. llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); - + // while(1) is common, avoid extra exit blocks. Be sure // to correctly handle break/continue though. bool EmitBoolCondBranch = true; @@ -365,23 +393,39 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) { // As long as the condition is true, go to the loop body. if (EmitBoolCondBranch) - Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); - + Builder.CreateCondBr(BoolCondVal, LoopBody, EffectiveExitBlock); + // Emit the loop body. - EmitBlock(LoopBody); - EmitStmt(S.getBody()); + { + CleanupScope BodyScope(*this); + EmitBlock(LoopBody); + EmitStmt(S.getBody()); + } BreakContinueStack.pop_back(); - // Cycle to the condition. - EmitBranch(LoopHeader); + if (CleanupBlock) { + // If we have a cleanup block, jump there to perform cleanups + // before looping. + EmitBranch(CleanupBlock); + + // Emit the cleanup block, performing cleanups for the condition + // and then jumping to either the loop header or the exit block. + EmitBlock(CleanupBlock); + ConditionScope.ForceCleanup(); + Builder.CreateCondBr(BoolCondVal, LoopHeader, ExitBlock); + } else { + // Cycle to the condition. + EmitBranch(LoopHeader); + } // Emit the exit block. EmitBlock(ExitBlock, true); + // The LoopHeader typically is just a branch if we skipped emitting // a branch, try to erase it. - if (!EmitBoolCondBranch) + if (!EmitBoolCondBranch && !CleanupBlock) SimplifyForwardingBlocks(LoopHeader); } @@ -435,6 +479,7 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S) { void CodeGenFunction::EmitForStmt(const ForStmt &S) { // FIXME: What do we do if the increment (f.e.) contains a stmt expression, // which contains a continue/break? + CleanupScope ForScope(*this); // Evaluate the first part before the loop. if (S.getInit()) @@ -443,18 +488,34 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) { // Start the loop with a block that tests the condition. llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); - + llvm::BasicBlock *IncBlock = 0; + llvm::BasicBlock *CondCleanup = 0; + llvm::BasicBlock *EffectiveExitBlock = AfterFor; EmitBlock(CondBlock); - // Evaluate the condition if present. If not, treat it as a - // non-zero-constant according to 6.8.5.3p2, aka, true. + // Create a cleanup scope for the condition variable cleanups. + CleanupScope ConditionScope(*this); + + llvm::Value *BoolCondVal = 0; if (S.getCond()) { + // If the for statement has a condition scope, emit the local variable + // declaration. + if (S.getConditionVariable()) { + EmitLocalBlockVarDecl(*S.getConditionVariable()); + + if (ConditionScope.requiresCleanups()) { + CondCleanup = createBasicBlock("for.cond.cleanup"); + EffectiveExitBlock = CondCleanup; + } + } + // As long as the condition is true, iterate the loop. llvm::BasicBlock *ForBody = createBasicBlock("for.body"); // C99 6.8.5p2/p4: The first substatement is executed if the expression // compares unequal to 0. The condition must be a scalar type. - EmitBranchOnBoolExpr(S.getCond(), ForBody, AfterFor); + BoolCondVal = EvaluateExprAsBool(S.getCond()); + Builder.CreateCondBr(BoolCondVal, ForBody, EffectiveExitBlock); EmitBlock(ForBody); } else { @@ -466,7 +527,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) { // condition as the continue block. llvm::BasicBlock *ContinueBlock; if (S.getInc()) - ContinueBlock = createBasicBlock("for.inc"); + ContinueBlock = IncBlock = createBasicBlock("for.inc"); else ContinueBlock = CondBlock; @@ -479,18 +540,34 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) { DI->setLocation(S.getSourceRange().getBegin()); DI->EmitRegionStart(CurFn, Builder); } - EmitStmt(S.getBody()); + + { + // Create a separate cleanup scope for the body, in case it is not + // a compound statement. + CleanupScope BodyScope(*this); + EmitStmt(S.getBody()); + } BreakContinueStack.pop_back(); // If there is an increment, emit it next. if (S.getInc()) { - EmitBlock(ContinueBlock); + EmitBlock(IncBlock); EmitStmt(S.getInc()); } // Finally, branch back up to the condition for the next iteration. - EmitBranch(CondBlock); + if (CondCleanup) { + // Branch to the cleanup block. + EmitBranch(CondCleanup); + + // Emit the cleanup block, which branches back to the loop body or + // outside of the for statement once it is done. + EmitBlock(CondCleanup); + ConditionScope.ForceCleanup(); + Builder.CreateCondBr(BoolCondVal, CondBlock, AfterFor); + } else + EmitBranch(CondBlock); if (DI) { DI->setLocation(S.getSourceRange().getEnd()); DI->EmitRegionEnd(CurFn, Builder); @@ -686,6 +763,11 @@ void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) { } void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { + CleanupScope ConditionScope(*this); + + if (S.getConditionVariable()) + EmitLocalBlockVarDecl(*S.getConditionVariable()); + llvm::Value *CondV = EmitScalarExpr(S.getCond()); // Handle nested switch statements. diff --git a/lib/CodeGen/CGCXXTemp.cpp b/lib/CodeGen/CGTemporaries.cpp index 4768556..5cfc7ef 100644 --- a/lib/CodeGen/CGCXXTemp.cpp +++ b/lib/CodeGen/CGTemporaries.cpp @@ -1,4 +1,4 @@ -//===--- CGCXXTemp.cpp - Emit LLVM Code for C++ temporaries ---------------===// +//===--- CGTemporaries.cpp - Emit LLVM Code for C++ temporaries -----------===// // // The LLVM Compiler Infrastructure // @@ -23,7 +23,7 @@ void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary, // Check if temporaries need to be conditional. If so, we'll create a // condition boolean, initialize it to 0 and - if (!ConditionalTempDestructionStack.empty()) { + if (ConditionalBranchLevel != 0) { CondPtr = CreateTempAlloca(llvm::Type::getInt1Ty(VMContext), "cond"); // Initialize it to false. This initialization takes place right after @@ -141,23 +141,3 @@ LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue( return LV; } - -void -CodeGenFunction::PushConditionalTempDestruction() { - // Store the current number of live temporaries. - ConditionalTempDestructionStack.push_back(LiveTemporaries.size()); -} - -void CodeGenFunction::PopConditionalTempDestruction() { - size_t NumLiveTemporaries = ConditionalTempDestructionStack.back(); - ConditionalTempDestructionStack.pop_back(); - - // Pop temporaries. - while (LiveTemporaries.size() > NumLiveTemporaries) { - assert(LiveTemporaries.back().CondPtr && - "Conditional temporary must have a cond ptr!"); - - PopCXXTemporary(); - } -} - diff --git a/lib/CodeGen/CGVtable.cpp b/lib/CodeGen/CGVtable.cpp index 9be1a3b..715aa4c 100644 --- a/lib/CodeGen/CGVtable.cpp +++ b/lib/CodeGen/CGVtable.cpp @@ -13,13 +13,15 @@ #include "CodeGenModule.h" #include "CodeGenFunction.h" - +#include "clang/AST/CXXInheritance.h" #include "clang/AST/RecordLayout.h" +#include "llvm/ADT/DenseSet.h" #include <cstdio> using namespace clang; using namespace CodeGen; +namespace { class VtableBuilder { public: /// Index_t - Vtable index type. @@ -52,54 +54,110 @@ private: llvm::DenseMap<GlobalDecl, Index_t> NonVirtualOffset; llvm::DenseMap<const CXXRecordDecl *, Index_t> VBIndex; - typedef llvm::DenseMap<GlobalDecl, int> Pures_t; - Pures_t Pures; - typedef std::pair<Index_t, Index_t> CallOffset; - typedef llvm::DenseMap<GlobalDecl, CallOffset> Thunks_t; - Thunks_t Thunks; - typedef llvm::DenseMap<GlobalDecl, - std::pair<std::pair<CallOffset, CallOffset>, - CanQualType> > CovariantThunks_t; - CovariantThunks_t CovariantThunks; + /// PureVirtualFunction - Points to __cxa_pure_virtual. + llvm::Constant *PureVirtualFn; + + /// Thunk - Represents a single thunk. + struct Thunk { + Thunk() + : Index(0) { } + + Thunk(uint64_t Index, const ThunkAdjustment &Adjustment) + : Index(Index), Adjustment(Adjustment) { } + + /// Index - The index in the vtable. + uint64_t Index; + + /// Adjustment - The thunk adjustment. + ThunkAdjustment Adjustment; + }; + + /// Thunks - The thunks in a vtable. + typedef llvm::DenseMap<GlobalDecl, Thunk> ThunksMapTy; + ThunksMapTy Thunks; + + /// CovariantThunk - Represents a single covariant thunk. + struct CovariantThunk { + CovariantThunk() + : Index(0) { } + + CovariantThunk(uint64_t Index, const ThunkAdjustment &ThisAdjustment, + const ThunkAdjustment &ReturnAdjustment, + CanQualType ReturnType) + : Index(Index), Adjustment(ThisAdjustment, ReturnAdjustment), + ReturnType(ReturnType) { } + + // Index - The index in the vtable. + uint64_t Index; + + /// Adjustment - The covariant thunk adjustment. + CovariantThunkAdjustment Adjustment; + + /// ReturnType - The return type of the function. + CanQualType ReturnType; + }; + + /// CovariantThunks - The covariant thunks in a vtable. + typedef llvm::DenseMap<GlobalDecl, CovariantThunk> CovariantThunksMapTy; + CovariantThunksMapTy CovariantThunks; + + /// PureVirtualMethods - Pure virtual methods. + typedef llvm::DenseSet<GlobalDecl> PureVirtualMethodsSetTy; + PureVirtualMethodsSetTy PureVirtualMethods; + std::vector<Index_t> VCalls; typedef std::pair<const CXXRecordDecl *, uint64_t> CtorVtable_t; - // CtorVtable - Used to hold the AddressPoints (offsets) into the built vtable - // for use in computing the initializers for the VTT. - llvm::DenseMap<CtorVtable_t, int64_t> &AddressPoints; + // subAddressPoints - Used to hold the AddressPoints (offsets) into the built + // vtable for use in computing the initializers for the VTT. + llvm::DenseMap<CtorVtable_t, int64_t> &subAddressPoints; typedef CXXRecordDecl::method_iterator method_iter; const bool Extern; const uint32_t LLVMPointerWidth; Index_t extra; typedef std::vector<std::pair<const CXXRecordDecl *, int64_t> > Path_t; - llvm::Constant *cxa_pure; + static llvm::DenseMap<CtorVtable_t, int64_t>& + AllocAddressPoint(CodeGenModule &cgm, const CXXRecordDecl *l, + const CXXRecordDecl *c) { + CodeGenModule::AddrMap_t *&oref = cgm.AddressPoints[l]; + if (oref == 0) + oref = new CodeGenModule::AddrMap_t; + + llvm::DenseMap<CtorVtable_t, int64_t> *&ref = (*oref)[c]; + if (ref == 0) + ref = new llvm::DenseMap<CtorVtable_t, int64_t>; + return *ref; + } + + /// getPureVirtualFn - Return the __cxa_pure_virtual function. + llvm::Constant* getPureVirtualFn() { + if (!PureVirtualFn) { + const llvm::FunctionType *Ty = + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), + /*isVarArg=*/false); + PureVirtualFn = wrap(CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual")); + } + + return PureVirtualFn; + } + public: VtableBuilder(std::vector<llvm::Constant *> &meth, const CXXRecordDecl *c, const CXXRecordDecl *l, uint64_t lo, CodeGenModule &cgm) : methods(meth), Class(c), LayoutClass(l), LayoutOffset(lo), BLayout(cgm.getContext().getASTRecordLayout(l)), rtti(cgm.GenerateRttiRef(c)), VMContext(cgm.getModule().getContext()), - CGM(cgm), AddressPoints(*new llvm::DenseMap<CtorVtable_t, int64_t>), + CGM(cgm), PureVirtualFn(0),subAddressPoints(AllocAddressPoint(cgm, l, c)), Extern(!l->isInAnonymousNamespace()), - LLVMPointerWidth(cgm.getContext().Target.getPointerWidth(0)) { + LLVMPointerWidth(cgm.getContext().Target.getPointerWidth(0)) { Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0); - - // Calculate pointer for ___cxa_pure_virtual. - const llvm::FunctionType *FTy; - std::vector<const llvm::Type*> ArgTys; - const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); - FTy = llvm::FunctionType::get(ResultType, ArgTys, false); - cxa_pure = wrap(CGM.CreateRuntimeFunction(FTy, "__cxa_pure_virtual")); } llvm::DenseMap<GlobalDecl, Index_t> &getIndex() { return Index; } llvm::DenseMap<const CXXRecordDecl *, Index_t> &getVBIndex() { return VBIndex; } - llvm::DenseMap<CtorVtable_t, int64_t> *getAddressPoints() - { return &AddressPoints; } - llvm::Constant *wrap(Index_t i) { llvm::Constant *m; m = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), i); @@ -147,8 +205,6 @@ public: SeenVBase.clear(); } - Index_t VBlookup(CXXRecordDecl *D, CXXRecordDecl *B); - Index_t getNVOffset_1(const CXXRecordDecl *D, const CXXRecordDecl *B, Index_t Offset = 0) { @@ -194,7 +250,7 @@ public: CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl()); CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl()); if (D != Class) - return VBlookup(D, B); + return CGM.getVtableInfo().getVirtualBaseOffsetIndex(D, B); llvm::DenseMap<const CXXRecordDecl *, Index_t>::iterator i; i = VBIndex.find(B); if (i != VBIndex.end()) @@ -242,22 +298,23 @@ public: CanQualType oret = CGM.getContext().getCanonicalType(nc_oret); QualType nc_ret = MD->getType()->getAs<FunctionType>()->getResultType(); CanQualType ret = CGM.getContext().getCanonicalType(nc_ret); - CallOffset ReturnOffset = std::make_pair(0, 0); + ThunkAdjustment ReturnAdjustment; if (oret != ret) { // FIXME: calculate offsets for covariance - if (CovariantThunks.count(OMD)) { - oret = CovariantThunks[OMD].second; - CovariantThunks.erase(OMD); + CovariantThunksMapTy::iterator i = CovariantThunks.find(OMD); + if (i != CovariantThunks.end()) { + oret = i->second.ReturnType; + CovariantThunks.erase(i); } // FIXME: Double check oret Index_t nv = getNVOffset(oret, ret)/8; - ReturnOffset = std::make_pair(nv, getVbaseOffset(oret, ret)); + ReturnAdjustment = ThunkAdjustment(nv, getVbaseOffset(oret, ret)); } Index[GD] = i; submethods[i] = m; if (isPure) - Pures[GD] = 1; - Pures.erase(OGD); + PureVirtualMethods.insert(GD); + PureVirtualMethods.erase(OGD); Thunks.erase(OGD); if (MorallyVirtual || VCall.count(OGD)) { Index_t &idx = VCall[OGD]; @@ -278,35 +335,38 @@ public: (int)VCalls[idx-1], Class->getNameAsCString())); } VCall[GD] = idx; - int64_t O = NonVirtualOffset[GD]; - int v = -((idx+extra+2)*LLVMPointerWidth/8); + int64_t NonVirtualAdjustment = NonVirtualOffset[GD]; + int64_t VirtualAdjustment = + -((idx + extra + 2) * LLVMPointerWidth / 8); + // Optimize out virtual adjustments of 0. if (VCalls[idx-1] == 0) - v = 0; - CallOffset ThisOffset = std::make_pair(O, v); + VirtualAdjustment = 0; + + ThunkAdjustment ThisAdjustment(NonVirtualAdjustment, + VirtualAdjustment); + // FIXME: Do we always have to build a covariant thunk to save oret, // which is the containing virtual base class? - if (ReturnOffset.first || ReturnOffset.second) - CovariantThunks[GD] = std::make_pair(std::make_pair(ThisOffset, - ReturnOffset), - oret); - else if (!isPure && (ThisOffset.first || ThisOffset.second)) - Thunks[GD] = ThisOffset; + if (!ReturnAdjustment.isEmpty()) { + CovariantThunks[GD] = + CovariantThunk(i, ThisAdjustment, ReturnAdjustment, oret); + } else if (!isPure && !ThisAdjustment.isEmpty()) + Thunks[GD] = Thunk(i, ThisAdjustment); return true; } // FIXME: finish off - int64_t O = VCallOffset[OGD] - OverrideOffset/8; + int64_t NonVirtualAdjustment = VCallOffset[OGD] - OverrideOffset/8; - if (O || ReturnOffset.first || ReturnOffset.second) { - CallOffset ThisOffset = std::make_pair(O, 0); + if (NonVirtualAdjustment || !ReturnAdjustment.isEmpty()) { + ThunkAdjustment ThisAdjustment(NonVirtualAdjustment, 0); - if (ReturnOffset.first || ReturnOffset.second) - CovariantThunks[GD] = std::make_pair(std::make_pair(ThisOffset, - ReturnOffset), - oret); - else if (!isPure) - Thunks[GD] = ThisOffset; + if (!ReturnAdjustment.isEmpty()) { + CovariantThunks[GD] = + CovariantThunk(i, ThisAdjustment, ReturnAdjustment, oret); + } else if (!isPure) + Thunks[GD] = Thunk(i, ThisAdjustment); } return true; } @@ -316,40 +376,39 @@ public: } void InstallThunks() { - for (Thunks_t::iterator i = Thunks.begin(), e = Thunks.end(); + for (ThunksMapTy::const_iterator i = Thunks.begin(), e = Thunks.end(); i != e; ++i) { GlobalDecl GD = i->first; const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); - assert(!MD->isPure() && "Trying to thunk a pure"); - Index_t idx = Index[GD]; - Index_t nv_O = i->second.first; - Index_t v_O = i->second.second; - submethods[idx] = CGM.BuildThunk(MD, Extern, nv_O, v_O); + assert(!MD->isPure() && "Can't thunk pure virtual methods!"); + + const Thunk& Thunk = i->second; + assert(Thunk.Index == Index[GD] && "Thunk index mismatch!"); + + submethods[Thunk.Index] = CGM.BuildThunk(MD, Extern, Thunk.Adjustment); } Thunks.clear(); - for (CovariantThunks_t::iterator i = CovariantThunks.begin(), - e = CovariantThunks.end(); - i != e; ++i) { + + for (CovariantThunksMapTy::const_iterator i = CovariantThunks.begin(), + e = CovariantThunks.end(); i != e; ++i) { GlobalDecl GD = i->first; const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); if (MD->isPure()) continue; - Index_t idx = Index[GD]; - Index_t nv_t = i->second.first.first.first; - Index_t v_t = i->second.first.first.second; - Index_t nv_r = i->second.first.second.first; - Index_t v_r = i->second.first.second.second; - submethods[idx] = CGM.BuildCovariantThunk(MD, Extern, nv_t, v_t, nv_r, - v_r); + + const CovariantThunk &Thunk = i->second; + assert(Thunk.Index == Index[GD] && "Thunk index mismatch!"); + submethods[Thunk.Index] = + CGM.BuildCovariantThunk(MD, Extern, Thunk.Adjustment); } CovariantThunks.clear(); - for (Pures_t::iterator i = Pures.begin(), e = Pures.end(); - i != e; ++i) { - GlobalDecl GD = i->first; - Index_t idx = Index[GD]; - submethods[idx] = cxa_pure; + + for (PureVirtualMethodsSetTy::iterator i = PureVirtualMethods.begin(), + e = PureVirtualMethods.end(); i != e; ++i) { + GlobalDecl GD = *i; + submethods[Index[GD]] = getPureVirtualFn(); } - Pures.clear(); + PureVirtualMethods.clear(); } llvm::Constant *WrapAddrOf(GlobalDecl GD) { @@ -358,10 +417,7 @@ public: if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) return wrap(CGM.GetAddrOfCXXDestructor(Dtor, GD.getDtorType())); - const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); - const llvm::Type *Ty = - CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), - FPT->isVariadic()); + const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVtable(MD); return wrap(CGM.GetAddrOfFunction(MD, Ty)); } @@ -397,7 +453,7 @@ public: } void AddMethod(const GlobalDecl GD, bool MorallyVirtual, Index_t Offset, - bool ForVirtualBase, int64_t CurrentVBaseOffset) { + int64_t CurrentVBaseOffset) { llvm::Constant *m = WrapAddrOf(GD); // If we can find a previously allocated slot for this, reuse it. @@ -413,7 +469,7 @@ public: D1(printf(" vfn for %s at %d\n", MD->getNameAsString().c_str(), (int)Index[GD])); if (MD->isPure()) - Pures[GD] = 1; + PureVirtualMethods.insert(GD); if (MorallyVirtual) { VCallOffset[GD] = Offset/8; Index_t &idx = VCall[GD]; @@ -429,8 +485,7 @@ public: } void AddMethods(const CXXRecordDecl *RD, bool MorallyVirtual, - Index_t Offset, bool RDisVirtualBase, - int64_t CurrentVBaseOffset) { + Index_t Offset, int64_t CurrentVBaseOffset) { for (method_iter mi = RD->method_begin(), me = RD->method_end(); mi != me; ++mi) { const CXXMethodDecl *MD = *mi; @@ -441,12 +496,11 @@ public: // For destructors, add both the complete and the deleting destructor // to the vtable. AddMethod(GlobalDecl(DD, Dtor_Complete), MorallyVirtual, Offset, - RDisVirtualBase, CurrentVBaseOffset); + CurrentVBaseOffset); AddMethod(GlobalDecl(DD, Dtor_Deleting), MorallyVirtual, Offset, - RDisVirtualBase, CurrentVBaseOffset); - } else - AddMethod(MD, MorallyVirtual, Offset, RDisVirtualBase, CurrentVBaseOffset); + } else + AddMethod(MD, MorallyVirtual, Offset, CurrentVBaseOffset); } } @@ -495,7 +549,7 @@ public: D1(printf("XXX address point for %s in %s layout %s at offset %d is %d\n", RD->getNameAsCString(), Class->getNameAsCString(), LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint)); - AddressPoints[std::make_pair(RD, Offset)] = AddressPoint; + subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint; // Now also add the address point for all our primary bases. while (1) { @@ -511,7 +565,7 @@ public: D1(printf("XXX address point for %s in %s layout %s at offset %d is %d\n", RD->getNameAsCString(), Class->getNameAsCString(), LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint)); - AddressPoints[std::make_pair(RD, Offset)] = AddressPoint; + subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint; } } @@ -572,7 +626,7 @@ public: void Primaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset, bool updateVBIndex, Index_t current_vbindex, - bool RDisVirtualBase, int64_t CurrentVBaseOffset) { + int64_t CurrentVBaseOffset) { if (!RD->isDynamicClass()) return; @@ -591,21 +645,20 @@ public: if (!PrimaryBaseWasVirtual) Primaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset, - updateVBIndex, current_vbindex, PrimaryBaseWasVirtual, - BaseCurrentVBaseOffset); + updateVBIndex, current_vbindex, BaseCurrentVBaseOffset); } D1(printf(" doing vcall entries for %s most derived %s\n", RD->getNameAsCString(), Class->getNameAsCString())); // And add the virtuals for the class to the primary vtable. - AddMethods(RD, MorallyVirtual, Offset, RDisVirtualBase, CurrentVBaseOffset); + AddMethods(RD, MorallyVirtual, Offset, CurrentVBaseOffset); } void VBPrimaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset, bool updateVBIndex, Index_t current_vbindex, bool RDisVirtualBase, int64_t CurrentVBaseOffset, - bool bottom=false) { + bool bottom) { if (!RD->isDynamicClass()) return; @@ -626,7 +679,7 @@ public: VBPrimaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset, updateVBIndex, current_vbindex, PrimaryBaseWasVirtual, - BaseCurrentVBaseOffset); + BaseCurrentVBaseOffset, false); } D1(printf(" doing vbase entries for %s most derived %s\n", @@ -635,7 +688,7 @@ public: if (RDisVirtualBase || bottom) { Primaries(RD, MorallyVirtual, Offset, updateVBIndex, current_vbindex, - RDisVirtualBase, CurrentVBaseOffset); + CurrentVBaseOffset); } } @@ -718,30 +771,223 @@ public: } }; +} + +/// TypeConversionRequiresAdjustment - Returns whether conversion from a +/// derived type to a base type requires adjustment. +static bool +TypeConversionRequiresAdjustment(ASTContext &Ctx, + const CXXRecordDecl *DerivedDecl, + const CXXRecordDecl *BaseDecl) { + CXXBasePaths Paths(/*FindAmbiguities=*/false, + /*RecordPaths=*/true, /*DetectVirtual=*/true); + if (!const_cast<CXXRecordDecl *>(DerivedDecl)-> + isDerivedFrom(const_cast<CXXRecordDecl *>(BaseDecl), Paths)) { + assert(false && "Class must be derived from the passed in base class!"); + return false; + } + + // If we found a virtual base we always want to require adjustment. + if (Paths.getDetectedVirtual()) + return true; + + const CXXBasePath &Path = Paths.front(); + + for (size_t Start = 0, End = Path.size(); Start != End; ++Start) { + const CXXBasePathElement &Element = Path[Start]; + + // Check the base class offset. + const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(Element.Class); + + const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>(); + const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl()); + + if (Layout.getBaseClassOffset(Base) != 0) { + // This requires an adjustment. + return true; + } + } + + return false; +} + +static bool +TypeConversionRequiresAdjustment(ASTContext &Ctx, + QualType DerivedType, QualType BaseType) { + // Canonicalize the types. + QualType CanDerivedType = Ctx.getCanonicalType(DerivedType); + QualType CanBaseType = Ctx.getCanonicalType(BaseType); + + assert(CanDerivedType->getTypeClass() == CanBaseType->getTypeClass() && + "Types must have same type class!"); + + if (CanDerivedType == CanBaseType) { + // No adjustment needed. + return false; + } + + if (const ReferenceType *RT = dyn_cast<ReferenceType>(CanDerivedType)) { + CanDerivedType = RT->getPointeeType(); + CanBaseType = cast<ReferenceType>(CanBaseType)->getPointeeType(); + } else if (const PointerType *PT = dyn_cast<PointerType>(CanDerivedType)) { + CanDerivedType = PT->getPointeeType(); + CanBaseType = cast<PointerType>(CanBaseType)->getPointeeType(); + } else { + assert(false && "Unexpected return type!"); + } + + if (CanDerivedType == CanBaseType) { + // No adjustment needed. + return false; + } + + const CXXRecordDecl *DerivedDecl = + cast<CXXRecordDecl>(cast<RecordType>(CanDerivedType)->getDecl()); + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(cast<RecordType>(CanBaseType)->getDecl()); + + return TypeConversionRequiresAdjustment(Ctx, DerivedDecl, BaseDecl); +} + +void CGVtableInfo::ComputeMethodVtableIndices(const CXXRecordDecl *RD) { + + // Itanium C++ ABI 2.5.2: + // The order of the virtual function pointers in a virtual table is the + // order of declaration of the corresponding member functions in the class. + // + // There is an entry for any virtual function declared in a class, + // whether it is a new function or overrides a base class function, + // unless it overrides a function from the primary base, and conversion + // between their return types does not require an adjustment. + + int64_t CurrentIndex = 0; + + const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); + const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); + + if (PrimaryBase) { + assert(PrimaryBase->isDefinition() && + "Should have the definition decl of the primary base!"); -VtableBuilder::Index_t VtableBuilder::VBlookup(CXXRecordDecl *D, - CXXRecordDecl *B) { - return CGM.getVtableInfo().getVirtualBaseOffsetIndex(D, B); + // Since the record decl shares its vtable pointer with the primary base + // we need to start counting at the end of the primary base's vtable. + CurrentIndex = getNumVirtualFunctionPointers(PrimaryBase); + } + + const CXXDestructorDecl *ImplicitVirtualDtor = 0; + + for (CXXRecordDecl::method_iterator i = RD->method_begin(), + e = RD->method_end(); i != e; ++i) { + const CXXMethodDecl *MD = *i; + + // We only want virtual methods. + if (!MD->isVirtual()) + continue; + + bool ShouldAddEntryForMethod = true; + + // Check if this method overrides a method in the primary base. + for (CXXMethodDecl::method_iterator i = MD->begin_overridden_methods(), + e = MD->end_overridden_methods(); i != e; ++i) { + const CXXMethodDecl *OverriddenMD = *i; + const CXXRecordDecl *OverriddenRD = OverriddenMD->getParent(); + assert(OverriddenMD->isCanonicalDecl() && + "Should have the canonical decl of the overridden RD!"); + + if (OverriddenRD == PrimaryBase) { + // Check if converting from the return type of the method to the + // return type of the overridden method requires conversion. + QualType ReturnType = + MD->getType()->getAs<FunctionType>()->getResultType(); + QualType OverriddenReturnType = + OverriddenMD->getType()->getAs<FunctionType>()->getResultType(); + + if (!TypeConversionRequiresAdjustment(CGM.getContext(), + ReturnType, OverriddenReturnType)) { + // This index is shared between the index in the vtable of the primary + // base class. + if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { + const CXXDestructorDecl *OverriddenDD = + cast<CXXDestructorDecl>(OverriddenMD); + + // Add both the complete and deleting entries. + MethodVtableIndices[GlobalDecl(DD, Dtor_Complete)] = + getMethodVtableIndex(GlobalDecl(OverriddenDD, Dtor_Complete)); + MethodVtableIndices[GlobalDecl(DD, Dtor_Deleting)] = + getMethodVtableIndex(GlobalDecl(OverriddenDD, Dtor_Deleting)); + } else { + MethodVtableIndices[MD] = getMethodVtableIndex(OverriddenMD); + } + + // We don't need to add an entry for this method. + ShouldAddEntryForMethod = false; + break; + } + } + } + + if (!ShouldAddEntryForMethod) + continue; + + if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { + if (MD->isImplicit()) { + assert(!ImplicitVirtualDtor && + "Did already see an implicit virtual dtor!"); + ImplicitVirtualDtor = DD; + continue; + } + + // Add the complete dtor. + MethodVtableIndices[GlobalDecl(DD, Dtor_Complete)] = CurrentIndex++; + + // Add the deleting dtor. + MethodVtableIndices[GlobalDecl(DD, Dtor_Deleting)] = CurrentIndex++; + } else { + // Add the entry. + MethodVtableIndices[MD] = CurrentIndex++; + } + } + + if (ImplicitVirtualDtor) { + // Itanium C++ ABI 2.5.2: + // If a class has an implicitly-defined virtual destructor, + // its entries come after the declared virtual function pointers. + + // Add the complete dtor. + MethodVtableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Complete)] = + CurrentIndex++; + + // Add the deleting dtor. + MethodVtableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Deleting)] = + CurrentIndex++; + } + + NumVirtualFunctionPointers[RD] = CurrentIndex; } -int64_t CGVtableInfo::getMethodVtableIndex(GlobalDecl GD) { +uint64_t CGVtableInfo::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) { + llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I = + NumVirtualFunctionPointers.find(RD); + if (I != NumVirtualFunctionPointers.end()) + return I->second; + + ComputeMethodVtableIndices(RD); + + I = NumVirtualFunctionPointers.find(RD); + assert(I != NumVirtualFunctionPointers.end() && "Did not find entry!"); + return I->second; +} + +uint64_t CGVtableInfo::getMethodVtableIndex(GlobalDecl GD) { MethodVtableIndicesTy::iterator I = MethodVtableIndices.find(GD); if (I != MethodVtableIndices.end()) return I->second; const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent(); - - std::vector<llvm::Constant *> methods; - // FIXME: This seems expensive. Can we do a partial job to get - // just this data. - VtableBuilder b(methods, RD, RD, 0, CGM); - D1(printf("vtable %s\n", RD->getNameAsCString())); - b.GenerateVtableForBase(RD); - b.GenerateVtableForVBases(RD); - - MethodVtableIndices.insert(b.getIndex().begin(), - b.getIndex().end()); - + + ComputeMethodVtableIndices(RD); + I = MethodVtableIndices.find(GD); assert(I != MethodVtableIndices.end() && "Did not find index!"); return I->second; @@ -782,22 +1028,25 @@ llvm::Constant *CodeGenModule::GenerateVtable(const CXXRecordDecl *LayoutClass, const CXXRecordDecl *RD, uint64_t Offset) { llvm::SmallString<256> OutName; - llvm::raw_svector_ostream Out(OutName); if (LayoutClass != RD) - mangleCXXCtorVtable(getMangleContext(), LayoutClass, Offset/8, RD, Out); + getMangleContext().mangleCXXCtorVtable(LayoutClass, Offset/8, RD, OutName); else - mangleCXXVtable(getMangleContext(), RD, Out); - llvm::StringRef Name = Out.str(); + getMangleContext().mangleCXXVtable(RD, OutName); + llvm::StringRef Name = OutName.str(); std::vector<llvm::Constant *> methods; llvm::Type *Ptr8Ty=llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),0); int64_t AddressPoint; llvm::GlobalVariable *GV = getModule().getGlobalVariable(Name); - if (GV && AddressPoints[LayoutClass] && !GV->isDeclaration()) + if (GV && AddressPoints[LayoutClass] && !GV->isDeclaration()) { AddressPoint=(*(*(AddressPoints[LayoutClass]))[RD])[std::make_pair(RD, Offset)]; - else { + // FIXME: We can never have 0 address point. Do this for now so gepping + // retains the same structure. Later, we'll just assert. + if (AddressPoint == 0) + AddressPoint = 1; + } else { VtableBuilder b(methods, RD, LayoutClass, Offset, *this); D1(printf("vtable %s\n", RD->getNameAsCString())); @@ -807,20 +1056,14 @@ llvm::Constant *CodeGenModule::GenerateVtable(const CXXRecordDecl *LayoutClass, // then the vtables for all the virtual bases. b.GenerateVtableForVBases(RD, Offset); - CodeGenModule::AddrMap_t *&ref = AddressPoints[LayoutClass]; - if (ref == 0) - ref = new CodeGenModule::AddrMap_t; - - (*ref)[RD] = b.getAddressPoints(); - bool CreateDefinition = true; if (LayoutClass != RD) CreateDefinition = true; else { - // We have to convert it to have a record layout. - Types.ConvertTagDeclType(LayoutClass); - const CGRecordLayout &CGLayout = Types.getCGRecordLayout(LayoutClass); - if (const CXXMethodDecl *KeyFunction = CGLayout.getKeyFunction()) { + const ASTRecordLayout &Layout = + getContext().getASTRecordLayout(LayoutClass); + + if (const CXXMethodDecl *KeyFunction = Layout.getKeyFunction()) { if (!KeyFunction->getBody()) { // If there is a KeyFunction, and it isn't defined, just build a // reference to the vtable. @@ -862,6 +1105,7 @@ llvm::Constant *CodeGenModule::GenerateVtable(const CXXRecordDecl *LayoutClass, vtable = llvm::ConstantExpr::getInBoundsGetElementPtr(vtable, &AddressPointC, 1); + assert(vtable->getType() == Ptr8Ty); return vtable; } @@ -888,7 +1132,7 @@ class VTTBuilder { int64_t AddressPoint; AddressPoint = (*AddressPoints[VtblClass])[std::make_pair(RD, Offset)]; // FIXME: We can never have 0 address point. Do this for now so gepping - // retains the same structure. + // retains the same structure. Later we'll just assert. if (AddressPoint == 0) AddressPoint = 1; D1(printf("XXX address point for %s in %s layout %s at offset %d was %d\n", @@ -1034,9 +1278,8 @@ llvm::Constant *CodeGenModule::GenerateVTT(const CXXRecordDecl *RD) { return 0; llvm::SmallString<256> OutName; - llvm::raw_svector_ostream Out(OutName); - mangleCXXVTT(getMangleContext(), RD, Out); - llvm::StringRef Name = Out.str(); + getMangleContext().mangleCXXVTT(RD, OutName); + llvm::StringRef Name = OutName.str(); llvm::GlobalVariable::LinkageTypes linktype; linktype = llvm::GlobalValue::LinkOnceODRLinkage; @@ -1073,10 +1316,9 @@ llvm::Constant *CGVtableInfo::getVtable(const CXXRecordDecl *RD) { vtbl = CGM.GenerateVtable(RD, RD); bool CreateDefinition = true; - // We have to convert it to have a record layout. - CGM.getTypes().ConvertTagDeclType(RD); - const CGRecordLayout &CGLayout = CGM.getTypes().getCGRecordLayout(RD); - if (const CXXMethodDecl *KeyFunction = CGLayout.getKeyFunction()) { + + const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); + if (const CXXMethodDecl *KeyFunction = Layout.getKeyFunction()) { if (!KeyFunction->getBody()) { // If there is a KeyFunction, and it isn't defined, just build a // reference to the vtable. @@ -1096,3 +1338,31 @@ llvm::Constant *CGVtableInfo::getCtorVtable(const CXXRecordDecl *LayoutClass, uint64_t Offset) { return CGM.GenerateVtable(LayoutClass, RD, Offset); } + +void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) { + const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); + const CXXRecordDecl *RD = MD->getParent(); + + const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); + + // Get the key function. + const CXXMethodDecl *KeyFunction = Layout.getKeyFunction(); + + if (!KeyFunction) { + // If there's no key function, we don't want to emit the vtable here. + return; + } + + // Check if we have the key function. + if (KeyFunction->getCanonicalDecl() != MD->getCanonicalDecl()) + return; + + // If the key function is a destructor, we only want to emit the vtable once, + // so do it for the complete destructor. + if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() != Dtor_Complete) + return; + + // Emit the data. + GenerateClassData(RD); +} + diff --git a/lib/CodeGen/CGVtable.h b/lib/CodeGen/CGVtable.h index 78ae670..5c2b74c 100644 --- a/lib/CodeGen/CGVtable.h +++ b/lib/CodeGen/CGVtable.h @@ -17,56 +17,110 @@ #include "llvm/ADT/DenseMap.h" #include "GlobalDecl.h" +namespace llvm { + class Constant; +} + namespace clang { - class CXXMethodDecl; class CXXRecordDecl; - + namespace CodeGen { class CodeGenModule; - + +/// ThunkAdjustment - Virtual and non-virtual adjustment for thunks. +class ThunkAdjustment { +public: + ThunkAdjustment(int64_t NonVirtual, int64_t Virtual) + : NonVirtual(NonVirtual), + Virtual(Virtual) { } + + ThunkAdjustment() + : NonVirtual(0), Virtual(0) { } + + // isEmpty - Return whether this thunk adjustment is empty. + bool isEmpty() const { + return NonVirtual == 0 && Virtual == 0; + } + + /// NonVirtual - The non-virtual adjustment. + int64_t NonVirtual; + + /// Virtual - The virtual adjustment. + int64_t Virtual; +}; + +/// CovariantThunkAdjustment - Adjustment of the 'this' pointer and the +/// return pointer for covariant thunks. +class CovariantThunkAdjustment { +public: + CovariantThunkAdjustment(const ThunkAdjustment &ThisAdjustment, + const ThunkAdjustment &ReturnAdjustment) + : ThisAdjustment(ThisAdjustment), ReturnAdjustment(ReturnAdjustment) { } + + CovariantThunkAdjustment() { } + + ThunkAdjustment ThisAdjustment; + ThunkAdjustment ReturnAdjustment; +}; + class CGVtableInfo { CodeGenModule &CGM; - + /// MethodVtableIndices - Contains the index (relative to the vtable address /// point) where the function pointer for a virtual function is stored. typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVtableIndicesTy; MethodVtableIndicesTy MethodVtableIndices; - + typedef std::pair<const CXXRecordDecl *, const CXXRecordDecl *> ClassPairTy; - + /// VirtualBaseClassIndicies - Contains the index into the vtable where the /// offsets for virtual bases of a class are stored. typedef llvm::DenseMap<ClassPairTy, int64_t> VirtualBaseClassIndiciesTy; VirtualBaseClassIndiciesTy VirtualBaseClassIndicies; llvm::DenseMap<const CXXRecordDecl *, llvm::Constant *> Vtables; + + /// NumVirtualFunctionPointers - Contains the number of virtual function + /// pointers in the vtable for a given record decl. + llvm::DenseMap<const CXXRecordDecl *, uint64_t> NumVirtualFunctionPointers; + + /// getNumVirtualFunctionPointers - Return the number of virtual function + /// pointers in the vtable for a given record decl. + uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD); + + void ComputeMethodVtableIndices(const CXXRecordDecl *RD); + + /// GenerateClassData - Generate all the class data requires to be generated + /// upon definition of a KeyFunction. This includes the vtable, the + /// rtti data structure and the VTT. + void GenerateClassData(const CXXRecordDecl *RD); + public: - CGVtableInfo(CodeGenModule &CGM) + CGVtableInfo(CodeGenModule &CGM) : CGM(CGM) { } /// getMethodVtableIndex - Return the index (relative to the vtable address - /// point) where the function pointer for the given virtual function is + /// point) where the function pointer for the given virtual function is /// stored. - int64_t getMethodVtableIndex(GlobalDecl GD); - + uint64_t getMethodVtableIndex(GlobalDecl GD); + /// getVirtualBaseOffsetIndex - Return the index (relative to the vtable /// address point) where the offset of the virtual base that contains the /// given Base is stored, otherwise, if no virtual base contains the given /// class, return 0. Base must be a virtual base class or an unambigious /// base. - int64_t getVirtualBaseOffsetIndex(const CXXRecordDecl *RD, + int64_t getVirtualBaseOffsetIndex(const CXXRecordDecl *RD, const CXXRecordDecl *VBase); llvm::Constant *getVtable(const CXXRecordDecl *RD); llvm::Constant *getCtorVtable(const CXXRecordDecl *RD, const CXXRecordDecl *Class, uint64_t Offset); - /// GenerateClassData - Generate all the class data requires to be generated - /// upon definition of a KeyFunction. This includes the vtable, the - /// rtti data structure and the VTT. - void GenerateClassData(const CXXRecordDecl *RD); -}; + + void MaybeEmitVtable(GlobalDecl GD); +}; + } } #endif diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt index 10884a7..9281416 100644 --- a/lib/CodeGen/CMakeLists.txt +++ b/lib/CodeGen/CMakeLists.txt @@ -3,11 +3,9 @@ set(LLVM_NO_RTTI 1) add_clang_library(clangCodeGen CGBlocks.cpp CGBuiltin.cpp - CGCXX.cpp - CGCXXClass.cpp - CGCXXExpr.cpp - CGCXXTemp.cpp CGCall.cpp + CGClass.cpp + CGCXX.cpp CGDebugInfo.cpp CGDecl.cpp CGException.cpp @@ -15,6 +13,7 @@ add_clang_library(clangCodeGen CGExprAgg.cpp CGExprComplex.cpp CGExprConstant.cpp + CGExprCXX.cpp CGExprScalar.cpp CGObjC.cpp CGObjCGNU.cpp @@ -22,6 +21,7 @@ add_clang_library(clangCodeGen CGRecordLayoutBuilder.cpp CGRtti.cpp CGStmt.cpp + CGTemporaries.cpp CGVtable.cpp CodeGenFunction.cpp CodeGenModule.cpp diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp index 475c7bf..6e0a77c 100644 --- a/lib/CodeGen/CodeGenFunction.cpp +++ b/lib/CodeGen/CodeGenFunction.cpp @@ -29,7 +29,8 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) Builder(cgm.getModule().getContext()), DebugInfo(0), IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0), - CXXThisDecl(0) { + CXXThisDecl(0), CXXVTTDecl(0), + ConditionalBranchLevel(0) { LLVMIntTy = ConvertType(getContext().IntTy); LLVMPointerWidth = Target.getPointerWidth(0); } @@ -216,6 +217,24 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, } } +static bool NeedsVTTParameter(GlobalDecl GD) { + const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); + + // We don't have any virtual bases, just return early. + if (!MD->getParent()->getNumVBases()) + return false; + + // Check if we have a base constructor. + if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base) + return true; + + // Check if we have a base destructor. + if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) + return true; + + return false; +} + void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) { const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); @@ -235,6 +254,16 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, &getContext().Idents.get("this"), MD->getThisType(getContext())); Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType())); + + // Check if we need a VTT parameter as well. + if (NeedsVTTParameter(GD)) { + // FIXME: The comment about using a fake decl above applies here too. + QualType T = getContext().getPointerType(getContext().VoidPtrTy); + CXXVTTDecl = + ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), + &getContext().Idents.get("vtt"), T); + Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType())); + } } } @@ -317,6 +346,10 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, // Destroy the 'this' declaration. if (CXXThisDecl) CXXThisDecl->Destroy(getContext()); + + // Destroy the VTT declaration. + if (CXXVTTDecl) + CXXVTTDecl->Destroy(getContext()); } /// ContainsLabel - Return true if the statement contains a label in it. If diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h index d96c355..7f32045 100644 --- a/lib/CodeGen/CodeGenFunction.h +++ b/lib/CodeGen/CodeGenFunction.h @@ -133,17 +133,17 @@ public: /// block. CleanupBlockInfo PopCleanupBlock(); - /// CleanupScope - RAII object that will create a cleanup block and set the - /// insert point to that block. When destructed, it sets the insert point to - /// the previous block and pushes a new cleanup entry on the stack. - class CleanupScope { + /// DelayedCleanupBlock - RAII object that will create a cleanup block and set + /// the insert point to that block. When destructed, it sets the insert point + /// to the previous block and pushes a new cleanup entry on the stack. + class DelayedCleanupBlock { CodeGenFunction& CGF; llvm::BasicBlock *CurBB; llvm::BasicBlock *CleanupEntryBB; llvm::BasicBlock *CleanupExitBB; public: - CleanupScope(CodeGenFunction &cgf) + DelayedCleanupBlock(CodeGenFunction &cgf) : CGF(cgf), CurBB(CGF.Builder.GetInsertBlock()), CleanupEntryBB(CGF.createBasicBlock("cleanup")), CleanupExitBB(0) { CGF.Builder.SetInsertPoint(CleanupEntryBB); @@ -155,7 +155,7 @@ public: return CleanupExitBB; } - ~CleanupScope() { + ~DelayedCleanupBlock() { CGF.PushCleanupBlock(CleanupEntryBB, CleanupExitBB); // FIXME: This is silly, move this into the builder. if (CurBB) @@ -165,6 +165,50 @@ public: } }; + /// \brief Enters a new scope for capturing cleanups, all of which will be + /// executed once the scope is exited. + class CleanupScope { + CodeGenFunction& CGF; + size_t CleanupStackDepth; + bool OldDidCallStackSave; + bool PerformCleanup; + + CleanupScope(const CleanupScope &); // DO NOT IMPLEMENT + CleanupScope &operator=(const CleanupScope &); // DO NOT IMPLEMENT + + public: + /// \brief Enter a new cleanup scope. + explicit CleanupScope(CodeGenFunction &CGF) + : CGF(CGF), PerformCleanup(true) + { + CleanupStackDepth = CGF.CleanupEntries.size(); + OldDidCallStackSave = CGF.DidCallStackSave; + } + + /// \brief Exit this cleanup scope, emitting any accumulated + /// cleanups. + ~CleanupScope() { + if (PerformCleanup) { + CGF.DidCallStackSave = OldDidCallStackSave; + CGF.EmitCleanupBlocks(CleanupStackDepth); + } + } + + /// \brief Determine whether this scope requires any cleanups. + bool requiresCleanups() const { + return CGF.CleanupEntries.size() > CleanupStackDepth; + } + + /// \brief Force the emission of cleanups now, instead of waiting + /// until this object is destroyed. + void ForceCleanup() { + assert(PerformCleanup && "Already forced cleanup"); + CGF.DidCallStackSave = OldDidCallStackSave; + CGF.EmitCleanupBlocks(CleanupStackDepth); + PerformCleanup = false; + } + }; + /// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup /// blocks that have been added. void EmitCleanupBlocks(size_t OldCleanupStackSize); @@ -176,27 +220,31 @@ public: /// this behavior for branches? void EmitBranchThroughCleanup(llvm::BasicBlock *Dest); - /// PushConditionalTempDestruction - Should be called before a conditional - /// part of an expression is emitted. For example, before the RHS of the - /// expression below is emitted: + /// StartConditionalBranch - Should be called before a conditional part of an + /// expression is emitted. For example, before the RHS of the expression below + /// is emitted: /// /// b && f(T()); /// - /// This is used to make sure that any temporaryes created in the conditional + /// This is used to make sure that any temporaries created in the conditional /// branch are only destroyed if the branch is taken. - void PushConditionalTempDestruction(); + void StartConditionalBranch() { + ++ConditionalBranchLevel; + } - /// PopConditionalTempDestruction - Should be called after a conditional - /// part of an expression has been emitted. - void PopConditionalTempDestruction(); + /// FinishConditionalBranch - Should be called after a conditional part of an + /// expression has been emitted. + void FinishConditionalBranch() { + --ConditionalBranchLevel; + } private: CGDebugInfo *DebugInfo; - /// IndirectBranch - The first time an indirect goto is seen we create a - /// block with an indirect branch. Every time we see the address of a label - /// taken, we add the label to the indirect goto. Every subsequent indirect - /// goto is codegen'd as a jump to the IndirectBranch's basic block. + /// IndirectBranch - The first time an indirect goto is seen we create a block + /// with an indirect branch. Every time we see the address of a label taken, + /// we add the label to the indirect goto. Every subsequent indirect goto is + /// codegen'd as a jump to the IndirectBranch's basic block. llvm::IndirectBrInst *IndirectBranch; /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C @@ -269,10 +317,15 @@ private: /// BlockScopes - Map of which "cleanup scope" scope basic blocks have. BlockScopeMap BlockScopes; - /// CXXThisDecl - When parsing an C++ function, this will hold the implicit - /// 'this' declaration. + /// CXXThisDecl - When generating code for a C++ member function, + /// this will hold the implicit 'this' declaration. ImplicitParamDecl *CXXThisDecl; + /// CXXVTTDecl - When generating code for a base object constructor or + /// base object destructor with virtual bases, this will hold the implicit + /// VTT parameter. + ImplicitParamDecl *CXXVTTDecl; + /// CXXLiveTemporaryInfo - Holds information about a live C++ temporary. struct CXXLiveTemporaryInfo { /// Temporary - The live temporary. @@ -284,9 +337,9 @@ private: /// DtorBlock - The destructor block. llvm::BasicBlock *DtorBlock; - /// CondPtr - If this is a conditional temporary, this is the pointer to - /// the condition variable that states whether the destructor should be - /// called or not. + /// CondPtr - If this is a conditional temporary, this is the pointer to the + /// condition variable that states whether the destructor should be called + /// or not. llvm::Value *CondPtr; CXXLiveTemporaryInfo(const CXXTemporary *temporary, @@ -298,10 +351,10 @@ private: llvm::SmallVector<CXXLiveTemporaryInfo, 4> LiveTemporaries; - /// ConditionalTempDestructionStack - Contains the number of live temporaries - /// when PushConditionalTempDestruction was called. This is used so that - /// we know how many temporaries were created by a certain expression. - llvm::SmallVector<size_t, 4> ConditionalTempDestructionStack; + /// ConditionalBranchLevel - Contains the nesting level of the current + /// conditional branch. This is used so that we know if a temporary should be + /// destroyed conditionally. + unsigned ConditionalBranchLevel; /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM @@ -384,15 +437,17 @@ public: /// DynamicTypeAdjust - Do the non-virtual and virtual adjustments on an /// object pointer to alter the dynamic type of the pointer. Used by /// GenerateCovariantThunk for building thunks. - llvm::Value *DynamicTypeAdjust(llvm::Value *V, int64_t nv, int64_t v); + llvm::Value *DynamicTypeAdjust(llvm::Value *V, + const ThunkAdjustment &Adjustment); /// GenerateThunk - Generate a thunk for the given method llvm::Constant *GenerateThunk(llvm::Function *Fn, const CXXMethodDecl *MD, - bool Extern, int64_t nv, int64_t v); - llvm::Constant *GenerateCovariantThunk(llvm::Function *Fn, - const CXXMethodDecl *MD, bool Extern, - int64_t nv_t, int64_t v_t, - int64_t nv_r, int64_t v_r); + bool Extern, + const ThunkAdjustment &ThisAdjustment); + llvm::Constant * + GenerateCovariantThunk(llvm::Function *Fn, const CXXMethodDecl *MD, + bool Extern, + const CovariantThunkAdjustment &Adjustment); void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type); @@ -416,8 +471,8 @@ public: const FunctionArgList &Args); /// EmitDtorEpilogue - Emit all code that comes at the end of class's - /// destructor. This is to call destructors on members and base classes - /// in reverse order of their construction. + /// destructor. This is to call destructors on members and base classes in + /// reverse order of their construction. void EmitDtorEpilogue(const CXXDestructorDecl *Dtor, CXXDtorType Type); @@ -461,9 +516,9 @@ public: /// label maps to. llvm::BasicBlock *getBasicBlockForLabel(const LabelStmt *S); - /// SimplifyForwardingBlocks - If the given basic block is only a - /// branch to another basic block, simplify it. This assumes that no - /// other code could potentially reference the basic block. + /// SimplifyForwardingBlocks - If the given basic block is only a branch to + /// another basic block, simplify it. This assumes that no other code could + /// potentially reference the basic block. void SimplifyForwardingBlocks(llvm::BasicBlock *BB); /// EmitBlock - Emit the given block \arg BB and set it as the insert point, @@ -579,9 +634,9 @@ public: // instruction in LLVM instead once it works well enough. llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty); - // EmitVLASize - Generate code for any VLA size expressions that might occur - // in a variably modified type. If Ty is a VLA, will return the value that - // corresponds to the size in bytes of the VLA type. Will return 0 otherwise. + /// EmitVLASize - Generate code for any VLA size expressions that might occur + /// in a variably modified type. If Ty is a VLA, will return the value that + /// corresponds to the size in bytes of the VLA type. Will return 0 otherwise. /// /// This function can be called with a null (unreachable) insert point. llvm::Value *EmitVLASize(QualType Ty); @@ -594,15 +649,20 @@ public: /// generating code for an C++ member function. llvm::Value *LoadCXXThis(); - /// GetAddressCXXOfBaseClass - This function will add the necessary delta - /// to the load of 'this' and returns address of the base class. + /// GetAddressOfBaseClass - This function will add the necessary delta to the + /// load of 'this' and returns address of the base class. // FIXME. This currently only does a derived to non-virtual base conversion. // Other kinds of conversions will come later. - llvm::Value *GetAddressCXXOfBaseClass(llvm::Value *BaseValue, + llvm::Value *GetAddressOfBaseClass(llvm::Value *Value, + const CXXRecordDecl *ClassDecl, + const CXXRecordDecl *BaseClassDecl, + bool NullCheckValue); + + llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value, const CXXRecordDecl *ClassDecl, - const CXXRecordDecl *BaseClassDecl, + const CXXRecordDecl *DerivedClassDecl, bool NullCheckValue); - + llvm::Value * GetVirtualCXXBaseClassOffset(llvm::Value *This, const CXXRecordDecl *ClassDecl, @@ -637,10 +697,15 @@ public: void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, const ConstantArrayType *ArrayTy, - llvm::Value *ArrayPtr); + llvm::Value *ArrayPtr, + CallExpr::const_arg_iterator ArgBeg, + CallExpr::const_arg_iterator ArgEnd); + void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, llvm::Value *NumElements, - llvm::Value *ArrayPtr); + llvm::Value *ArrayPtr, + CallExpr::const_arg_iterator ArgBeg, + CallExpr::const_arg_iterator ArgEnd); void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D, const ArrayType *Array, @@ -858,7 +923,6 @@ public: LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E); - LValue EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E); LValue EmitCXXConstructLValue(const CXXConstructExpr *E); LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E); LValue EmitCXXExprWithTemporariesLValue(const CXXExprWithTemporaries *E); @@ -880,9 +944,8 @@ public: /// result type, and using the given argument list which specifies both the /// LLVM arguments and the types they were derived from. /// - /// \param TargetDecl - If given, the decl of the function in a - /// direct call; used to set attributes on the call (noreturn, - /// etc.). + /// \param TargetDecl - If given, the decl of the function in a direct call; + /// used to set attributes on the call (noreturn, etc.). RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee, const CallArgList &Args, @@ -994,15 +1057,14 @@ public: /// LoadComplexFromAddr - Load a complex number from the specified address. ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile); - /// CreateStaticBlockVarDecl - Create a zero-initialized LLVM global - /// for a static block var decl. + /// CreateStaticBlockVarDecl - Create a zero-initialized LLVM global for a + /// static block var decl. llvm::GlobalVariable * CreateStaticBlockVarDecl(const VarDecl &D, const char *Separator, - llvm::GlobalValue::LinkageTypes - Linkage); + llvm::GlobalValue::LinkageTypes Linkage); - /// EmitStaticCXXBlockVarDeclInit - Create the initializer for a C++ - /// runtime initialized static block var decl. + /// EmitStaticCXXBlockVarDeclInit - Create the initializer for a C++ runtime + /// initialized static block var decl. void EmitStaticCXXBlockVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV); diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp index 195acc5..4b3b122 100644 --- a/lib/CodeGen/CodeGenModule.cpp +++ b/lib/CodeGen/CodeGenModule.cpp @@ -21,6 +21,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/RecordLayout.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/SourceManager.h" @@ -160,19 +161,13 @@ const char *CodeGenModule::getMangledName(const GlobalDecl &GD) { /// the unmangled name. /// const char *CodeGenModule::getMangledName(const NamedDecl *ND) { - // In C, functions with no attributes never need to be mangled. Fastpath them. - if (!getLangOptions().CPlusPlus && !ND->hasAttrs()) { + if (!getMangleContext().shouldMangleDeclName(ND)) { assert(ND->getIdentifier() && "Attempt to mangle unnamed decl."); return ND->getNameAsCString(); } llvm::SmallString<256> Name; - llvm::raw_svector_ostream Out(Name); - if (!mangleName(getMangleContext(), ND, Out)) { - assert(ND->getIdentifier() && "Attempt to mangle unnamed decl."); - return ND->getNameAsCString(); - } - + getMangleContext().mangleName(ND, Name); Name += '\0'; return UniqueMangledName(Name.begin(), Name.end()); } @@ -353,8 +348,12 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D, else if (Features.getStackProtectorMode() == LangOptions::SSPReq) F->addFnAttr(llvm::Attribute::StackProtectReq); - if (const AlignedAttr *AA = D->getAttr<AlignedAttr>()) - F->setAlignment(AA->getAlignment()/8); + if (const AlignedAttr *AA = D->getAttr<AlignedAttr>()) { + unsigned width = Context.Target.getCharWidth(); + F->setAlignment(AA->getAlignment() / width); + while ((AA = AA->getNext<AlignedAttr>())) + F->setAlignment(std::max(F->getAlignment(), AA->getAlignment() / width)); + } // C++ ABI requires 2-byte alignment for member functions. if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D)) F->setAlignment(2); @@ -551,7 +550,7 @@ bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) { // cannot be. if (VD->isInAnonymousNamespace()) return true; - if (VD->getStorageClass() == VarDecl::Static) { + if (VD->getLinkage() == VarDecl::InternalLinkage) { // Initializer has side effects? if (VD->getInit() && VD->getInit()->HasSideEffects(Context)) return false; @@ -616,16 +615,9 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) { Context.getSourceManager(), "Generating code for declaration"); - if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) { - const CXXRecordDecl *RD = MD->getParent(); - // We have to convert it to have a record layout. - Types.ConvertTagDeclType(RD); - const CGRecordLayout &CGLayout = Types.getCGRecordLayout(RD); - // A definition of a KeyFunction, generates all the class data, such - // as vtable, rtti and the VTT. - if (CGLayout.getKeyFunction() == MD) - getVtableInfo().GenerateClassData(RD); - } + if (isa<CXXMethodDecl>(D)) + getVtableInfo().MaybeEmitVtable(GD); + if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D)) EmitCXXConstructor(CD, GD.getCtorType()); else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) @@ -697,143 +689,20 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(const char *MangledName, // A called constructor which has no definition or declaration need be // synthesized. else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) { - const CXXRecordDecl *ClassDecl = - cast<CXXRecordDecl>(CD->getDeclContext()); - if (CD->isCopyConstructor(getContext())) - DeferredCopyConstructorToEmit(D); - else if (!ClassDecl->hasUserDeclaredConstructor()) + if (CD->isImplicit()) + DeferredDeclsToEmit.push_back(D); + } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) { + if (DD->isImplicit()) + DeferredDeclsToEmit.push_back(D); + } else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { + if (MD->isCopyAssignment() && MD->isImplicit()) DeferredDeclsToEmit.push_back(D); } - else if (isa<CXXDestructorDecl>(FD)) - DeferredDestructorToEmit(D); - else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) - if (MD->isCopyAssignment()) - DeferredCopyAssignmentToEmit(D); } return F; } -/// Defer definition of copy constructor(s) which need be implicitly defined. -void CodeGenModule::DeferredCopyConstructorToEmit(GlobalDecl CopyCtorDecl) { - const CXXConstructorDecl *CD = - cast<CXXConstructorDecl>(CopyCtorDecl.getDecl()); - const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext()); - if (ClassDecl->hasTrivialCopyConstructor() || - ClassDecl->hasUserDeclaredCopyConstructor()) - return; - - // First make sure all direct base classes and virtual bases and non-static - // data mebers which need to have their copy constructors implicitly defined - // are defined. 12.8.p7 - for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin(); - Base != ClassDecl->bases_end(); ++Base) { - CXXRecordDecl *BaseClassDecl - = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); - if (CXXConstructorDecl *BaseCopyCtor = - BaseClassDecl->getCopyConstructor(Context, 0)) - GetAddrOfCXXConstructor(BaseCopyCtor, Ctor_Complete); - } - - for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(), - FieldEnd = ClassDecl->field_end(); - Field != FieldEnd; ++Field) { - QualType FieldType = Context.getCanonicalType((*Field)->getType()); - if (const ArrayType *Array = Context.getAsArrayType(FieldType)) - FieldType = Array->getElementType(); - if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) { - if ((*Field)->isAnonymousStructOrUnion()) - continue; - CXXRecordDecl *FieldClassDecl - = cast<CXXRecordDecl>(FieldClassType->getDecl()); - if (CXXConstructorDecl *FieldCopyCtor = - FieldClassDecl->getCopyConstructor(Context, 0)) - GetAddrOfCXXConstructor(FieldCopyCtor, Ctor_Complete); - } - } - DeferredDeclsToEmit.push_back(CopyCtorDecl); -} - -/// Defer definition of copy assignments which need be implicitly defined. -void CodeGenModule::DeferredCopyAssignmentToEmit(GlobalDecl CopyAssignDecl) { - const CXXMethodDecl *CD = cast<CXXMethodDecl>(CopyAssignDecl.getDecl()); - const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext()); - - if (ClassDecl->hasTrivialCopyAssignment() || - ClassDecl->hasUserDeclaredCopyAssignment()) - return; - - // First make sure all direct base classes and virtual bases and non-static - // data mebers which need to have their copy assignments implicitly defined - // are defined. 12.8.p12 - for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin(); - Base != ClassDecl->bases_end(); ++Base) { - CXXRecordDecl *BaseClassDecl - = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); - const CXXMethodDecl *MD = 0; - if (!BaseClassDecl->hasTrivialCopyAssignment() && - !BaseClassDecl->hasUserDeclaredCopyAssignment() && - BaseClassDecl->hasConstCopyAssignment(getContext(), MD)) - GetAddrOfFunction(MD, 0); - } - - for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(), - FieldEnd = ClassDecl->field_end(); - Field != FieldEnd; ++Field) { - QualType FieldType = Context.getCanonicalType((*Field)->getType()); - if (const ArrayType *Array = Context.getAsArrayType(FieldType)) - FieldType = Array->getElementType(); - if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) { - if ((*Field)->isAnonymousStructOrUnion()) - continue; - CXXRecordDecl *FieldClassDecl - = cast<CXXRecordDecl>(FieldClassType->getDecl()); - const CXXMethodDecl *MD = 0; - if (!FieldClassDecl->hasTrivialCopyAssignment() && - !FieldClassDecl->hasUserDeclaredCopyAssignment() && - FieldClassDecl->hasConstCopyAssignment(getContext(), MD)) - GetAddrOfFunction(MD, 0); - } - } - DeferredDeclsToEmit.push_back(CopyAssignDecl); -} - -void CodeGenModule::DeferredDestructorToEmit(GlobalDecl DtorDecl) { - const CXXDestructorDecl *DD = cast<CXXDestructorDecl>(DtorDecl.getDecl()); - const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(DD->getDeclContext()); - if (ClassDecl->hasTrivialDestructor() || - ClassDecl->hasUserDeclaredDestructor()) - return; - - for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin(); - Base != ClassDecl->bases_end(); ++Base) { - CXXRecordDecl *BaseClassDecl - = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); - if (const CXXDestructorDecl *BaseDtor = - BaseClassDecl->getDestructor(Context)) - GetAddrOfCXXDestructor(BaseDtor, Dtor_Complete); - } - - for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(), - FieldEnd = ClassDecl->field_end(); - Field != FieldEnd; ++Field) { - QualType FieldType = Context.getCanonicalType((*Field)->getType()); - if (const ArrayType *Array = Context.getAsArrayType(FieldType)) - FieldType = Array->getElementType(); - if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) { - if ((*Field)->isAnonymousStructOrUnion()) - continue; - CXXRecordDecl *FieldClassDecl - = cast<CXXRecordDecl>(FieldClassType->getDecl()); - if (const CXXDestructorDecl *FieldDtor = - FieldClassDecl->getDestructor(Context)) - GetAddrOfCXXDestructor(FieldDtor, Dtor_Complete); - } - } - DeferredDeclsToEmit.push_back(DtorDecl); -} - - /// GetAddrOfFunction - Return the address of the given function. If Ty is /// non-null, then this function will use the specified type if it has to /// create it (this occurs when we see a definition of the function). @@ -982,9 +851,8 @@ GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) { return CodeGenModule::GVA_TemplateInstantiation; } } - - // Static variables get internal linkage. - if (VD->getStorageClass() == VarDecl::Static) + + if (VD->getLinkage() == VarDecl::InternalLinkage) return CodeGenModule::GVA_Internal; return CodeGenModule::GVA_StrongExternal; @@ -1097,7 +965,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) { GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage); } else if (Linkage == GVA_TemplateInstantiation) GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage); - else if (!CodeGenOpts.NoCommon && + else if (!getLangOptions().CPlusPlus && !CodeGenOpts.NoCommon && !D->hasExternalStorage() && !D->getInit() && !D->getAttr<SectionAttr>()) { GV->setLinkage(llvm::GlobalVariable::CommonLinkage); @@ -1734,6 +1602,10 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) { case Decl::NamespaceAlias: break; case Decl::CXXConstructor: + // Skip function templates + if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate()) + return; + EmitCXXConstructors(cast<CXXConstructorDecl>(D)); break; case Decl::CXXDestructor: diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h index c8562d6..78bc4ed 100644 --- a/lib/CodeGen/CodeGenModule.h +++ b/lib/CodeGen/CodeGenModule.h @@ -231,15 +231,16 @@ public: llvm::Constant *GenerateRttiRef(const CXXRecordDecl *RD); /// GenerateRttiNonClass - Generate the rtti information for the given /// non-class type. - llvm::Constant *GenerateRttiNonClass(QualType Ty); + llvm::Constant *GenerateRtti(QualType Ty); + + /// BuildThunk - Build a thunk for the given method. + llvm::Constant *BuildThunk(const CXXMethodDecl *MD, bool Extern, + const ThunkAdjustment &ThisAdjustment); - /// BuildThunk - Build a thunk for the given method - llvm::Constant *BuildThunk(const CXXMethodDecl *MD, bool Extern, int64_t nv, - int64_t v); /// BuildCoVariantThunk - Build a thunk for the given method - llvm::Constant *BuildCovariantThunk(const CXXMethodDecl *MD, bool Extern, - int64_t nv_t, int64_t v_t, - int64_t nv_r, int64_t v_r); + llvm::Constant * + BuildCovariantThunk(const CXXMethodDecl *MD, bool Extern, + const CovariantThunkAdjustment &Adjustment); typedef std::pair<const CXXRecordDecl *, uint64_t> CtorVtable_t; typedef llvm::DenseMap<const CXXRecordDecl *, @@ -427,9 +428,6 @@ private: llvm::Constant *GetOrCreateLLVMGlobal(const char *MangledName, const llvm::PointerType *PTy, const VarDecl *D); - void DeferredCopyConstructorToEmit(GlobalDecl D); - void DeferredCopyAssignmentToEmit(GlobalDecl D); - void DeferredDestructorToEmit(GlobalDecl D); /// SetCommonAttributes - Set attributes which are common to any /// form of a global definition (alias, Objective-C method, diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp index 1f83f37..c89879f 100644 --- a/lib/CodeGen/CodeGenTypes.cpp +++ b/lib/CodeGen/CodeGenTypes.cpp @@ -38,7 +38,13 @@ CodeGenTypes::~CodeGenTypes() { I = CGRecordLayouts.begin(), E = CGRecordLayouts.end(); I != E; ++I) delete I->second; - CGRecordLayouts.clear(); + { + llvm::FoldingSet<CGFunctionInfo>::iterator + I = FunctionInfos.begin(), E = FunctionInfos.end(); + while (I != E) + delete &*I++; + } + delete TheABIInfo; } /// ConvertType - Convert the specified type to its LLVM form. @@ -197,6 +203,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { case BuiltinType::Void: case BuiltinType::ObjCId: case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: // LLVM void type can only be used as the result of a function call. Just // map to the same as char. return llvm::IntegerType::get(getLLVMContext(), 8); diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h index f447549..2ff602f 100644 --- a/lib/CodeGen/CodeGenTypes.h +++ b/lib/CodeGen/CodeGenTypes.h @@ -20,6 +20,7 @@ #include <vector> #include "CGCall.h" +#include "CGCXX.h" namespace llvm { class FunctionType; @@ -34,6 +35,8 @@ namespace llvm { namespace clang { class ABIInfo; class ASTContext; + class CXXConstructorDecl; + class CXXDestructorDecl; class CXXMethodDecl; class FieldDecl; class FunctionProtoType; @@ -61,17 +64,9 @@ namespace CodeGen { /// is a member pointer, or a struct that contains a member pointer. bool ContainsMemberPointer; - /// KeyFunction - The key function of the record layout (if one exists), - /// which is the first non-pure virtual function that is not inline at the - /// point of class definition. - /// See http://www.codesourcery.com/public/cxx-abi/abi.html#vague-vtable. - const CXXMethodDecl *KeyFunction; - public: - CGRecordLayout(const llvm::Type *T, bool ContainsMemberPointer, - const CXXMethodDecl *KeyFunction) - : LLVMType(T), ContainsMemberPointer(ContainsMemberPointer), - KeyFunction(KeyFunction) { } + CGRecordLayout(const llvm::Type *T, bool ContainsMemberPointer) + : LLVMType(T), ContainsMemberPointer(ContainsMemberPointer) { } /// getLLVMType - Return llvm type associated with this record. const llvm::Type *getLLVMType() const { @@ -81,10 +76,6 @@ namespace CodeGen { bool containsMemberPointer() const { return ContainsMemberPointer; } - - const CXXMethodDecl *getKeyFunction() const { - return KeyFunction; - } }; /// CodeGenTypes - This class organizes the cross-module state that is used @@ -173,6 +164,12 @@ public: const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info, bool IsVariadic); + + /// GetFunctionTypeForVtable - Get the LLVM function type for use in a vtable, + /// given a CXXMethodDecl. If the method to has an incomplete return type, + /// and/or incomplete argument types, this will return the opaque type. + const llvm::Type *GetFunctionTypeForVtable(const CXXMethodDecl *MD); + const CGRecordLayout &getCGRecordLayout(const TagDecl*) const; /// getLLVMFieldNo - Return llvm::StructType element number @@ -192,7 +189,11 @@ public: const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD); const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD); const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD); - + const CGFunctionInfo &getFunctionInfo(const CXXConstructorDecl *D, + CXXCtorType Type); + const CGFunctionInfo &getFunctionInfo(const CXXDestructorDecl *D, + CXXDtorType Type); + // getFunctionInfo - Get the function info for a member function. const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD, const FunctionProtoType *FTP); diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp index 0a7124d..d6f7808 100644 --- a/lib/CodeGen/Mangle.cpp +++ b/lib/CodeGen/Mangle.cpp @@ -23,100 +23,111 @@ #include "clang/AST/ExprCXX.h" #include "clang/Basic/SourceManager.h" #include "llvm/ADT/StringExtras.h" -#include "llvm/Support/Compiler.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/ErrorHandling.h" +#include "CGVtable.h" using namespace clang; +using namespace CodeGen; namespace { - class VISIBILITY_HIDDEN CXXNameMangler { - MangleContext &Context; - llvm::raw_ostream &Out; - - const CXXMethodDecl *Structor; - unsigned StructorType; - CXXCtorType CtorType; - - llvm::DenseMap<uintptr_t, unsigned> Substitutions; - - public: - CXXNameMangler(MangleContext &C, llvm::raw_ostream &os) - : Context(C), Out(os), Structor(0), StructorType(0) { } - - bool mangle(const NamedDecl *D); - void mangleCalloffset(int64_t nv, int64_t v); - void mangleThunk(const FunctionDecl *FD, int64_t nv, int64_t v); - void mangleCovariantThunk(const FunctionDecl *FD, - int64_t nv_t, int64_t v_t, - int64_t nv_r, int64_t v_r); - void mangleGuardVariable(const VarDecl *D); - - void mangleCXXVtable(const CXXRecordDecl *RD); - void mangleCXXVTT(const CXXRecordDecl *RD); - void mangleCXXCtorVtable(const CXXRecordDecl *RD, int64_t Offset, - const CXXRecordDecl *Type); - void mangleCXXRtti(QualType Ty); - void mangleCXXRttiName(QualType Ty); - void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type); - void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type); - - private: - bool mangleSubstitution(const NamedDecl *ND); - bool mangleSubstitution(QualType T); - bool mangleSubstitution(uintptr_t Ptr); - - bool mangleStandardSubstitution(const NamedDecl *ND); - - void addSubstitution(const NamedDecl *ND) { - ND = cast<NamedDecl>(ND->getCanonicalDecl()); + +static const CXXMethodDecl *getStructor(const CXXMethodDecl *MD) { + assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) && + "Passed in decl is not a ctor or dtor!"); + + if (const TemplateDecl *TD = MD->getPrimaryTemplate()) { + MD = cast<CXXMethodDecl>(TD->getTemplatedDecl()); - addSubstitution(reinterpret_cast<uintptr_t>(ND)); - } - void addSubstitution(QualType T); - void addSubstitution(uintptr_t Ptr); + assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) && + "Templated decl is not a ctor or dtor!"); + } - bool mangleFunctionDecl(const FunctionDecl *FD); - - void mangleFunctionEncoding(const FunctionDecl *FD); - void mangleName(const NamedDecl *ND); - void mangleName(const TemplateDecl *TD, - const TemplateArgument *TemplateArgs, - unsigned NumTemplateArgs); - void mangleUnqualifiedName(const NamedDecl *ND); - void mangleUnscopedName(const NamedDecl *ND); - void mangleUnscopedTemplateName(const TemplateDecl *ND); - void mangleSourceName(const IdentifierInfo *II); - void mangleLocalName(const NamedDecl *ND); - void mangleNestedName(const NamedDecl *ND); - void mangleNestedName(const TemplateDecl *TD, - const TemplateArgument *TemplateArgs, - unsigned NumTemplateArgs); - void manglePrefix(const DeclContext *DC); - void mangleTemplatePrefix(const TemplateDecl *ND); - void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity); - void mangleQualifiers(Qualifiers Quals); - void mangleType(QualType T); - - // Declare manglers for every type class. + return MD; +} + +/// CXXNameMangler - Manage the mangling of a single name. +class CXXNameMangler { + MangleContext &Context; + llvm::raw_svector_ostream Out; + + const CXXMethodDecl *Structor; + unsigned StructorType; + + llvm::DenseMap<uintptr_t, unsigned> Substitutions; + +public: + CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res) + : Context(C), Out(Res), Structor(0), StructorType(0) { } + CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res, + const CXXConstructorDecl *D, CXXCtorType Type) + : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { } + CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res, + const CXXDestructorDecl *D, CXXDtorType Type) + : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { } + + llvm::raw_svector_ostream &getStream() { return Out; } + + void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z"); + void mangleCallOffset(const ThunkAdjustment &Adjustment); + void mangleNumber(int64_t Number); + void mangleFunctionEncoding(const FunctionDecl *FD); + void mangleName(const NamedDecl *ND); + void mangleType(QualType T); + +private: + bool mangleSubstitution(const NamedDecl *ND); + bool mangleSubstitution(QualType T); + bool mangleSubstitution(uintptr_t Ptr); + + bool mangleStandardSubstitution(const NamedDecl *ND); + + void addSubstitution(const NamedDecl *ND) { + ND = cast<NamedDecl>(ND->getCanonicalDecl()); + + addSubstitution(reinterpret_cast<uintptr_t>(ND)); + } + void addSubstitution(QualType T); + void addSubstitution(uintptr_t Ptr); + + bool mangleFunctionDecl(const FunctionDecl *FD); + + void mangleName(const TemplateDecl *TD, + const TemplateArgument *TemplateArgs, + unsigned NumTemplateArgs); + void mangleUnqualifiedName(const NamedDecl *ND); + void mangleUnscopedName(const NamedDecl *ND); + void mangleUnscopedTemplateName(const TemplateDecl *ND); + void mangleSourceName(const IdentifierInfo *II); + void mangleLocalName(const NamedDecl *ND); + void mangleNestedName(const NamedDecl *ND); + void mangleNestedName(const TemplateDecl *TD, + const TemplateArgument *TemplateArgs, + unsigned NumTemplateArgs); + void manglePrefix(const DeclContext *DC); + void mangleTemplatePrefix(const TemplateDecl *ND); + void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity); + void mangleQualifiers(Qualifiers Quals); + + // Declare manglers for every type class. #define ABSTRACT_TYPE(CLASS, PARENT) #define NON_CANONICAL_TYPE(CLASS, PARENT) #define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T); #include "clang/AST/TypeNodes.def" - void mangleType(const TagType*); - void mangleBareFunctionType(const FunctionType *T, - bool MangleReturnType); - void mangleExpression(const Expr *E); - void mangleCXXCtorType(CXXCtorType T); - void mangleCXXDtorType(CXXDtorType T); - - void mangleTemplateArgs(const TemplateArgument *TemplateArgs, - unsigned NumTemplateArgs); - void mangleTemplateArgumentList(const TemplateArgumentList &L); - void mangleTemplateArgument(const TemplateArgument &A); - - void mangleTemplateParameter(unsigned Index); - }; + void mangleType(const TagType*); + void mangleBareFunctionType(const FunctionType *T, + bool MangleReturnType); + void mangleExpression(const Expr *E); + void mangleCXXCtorType(CXXCtorType T); + void mangleCXXDtorType(CXXDtorType T); + + void mangleTemplateArgs(const TemplateArgument *TemplateArgs, + unsigned NumTemplateArgs); + void mangleTemplateArgumentList(const TemplateArgumentList &L); + void mangleTemplateArgument(const TemplateArgument &A); + + void mangleTemplateParameter(unsigned Index); +}; } static bool isInCLinkageSpecification(const Decl *D) { @@ -130,132 +141,72 @@ static bool isInCLinkageSpecification(const Decl *D) { return false; } -bool CXXNameMangler::mangleFunctionDecl(const FunctionDecl *FD) { +bool MangleContext::shouldMangleDeclName(const NamedDecl *D) { + // In C, functions with no attributes never need to be mangled. Fastpath them. + if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs()) + return false; + + // Any decl can be declared with __asm("foo") on it, and this takes precedence + // over all other naming in the .o file. + if (D->hasAttr<AsmLabelAttr>()) + return true; + // Clang's "overloadable" attribute extension to C/C++ implies name mangling // (always) as does passing a C++ member function and a function // whose name is not a simple identifier. - if (!FD->hasAttr<OverloadableAttr>() && !isa<CXXMethodDecl>(FD) && - FD->getDeclName().isIdentifier()) { - // C functions are not mangled, and "main" is never mangled. - if (!Context.getASTContext().getLangOptions().CPlusPlus || FD->isMain()) - return false; + const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); + if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) || + !FD->getDeclName().isIdentifier())) + return true; - // No mangling in an "implicit extern C" header. - if (FD->getLocation().isValid() && - Context.getASTContext().getSourceManager(). - isInExternCSystemHeader(FD->getLocation())) - return false; + // Otherwise, no mangling is done outside C++ mode. + if (!getASTContext().getLangOptions().CPlusPlus) + return false; - // No name mangling in a C linkage specification. - if (isInCLinkageSpecification(FD)) - return false; - } + // No mangling in an "implicit extern C" header. + if (D->getLocation().isValid() && + getASTContext().getSourceManager(). + isInExternCSystemHeader(D->getLocation())) + return false; + + // C functions, "main", and variables at global scope are not + // mangled. + if ((FD && FD->isMain()) || + (!FD && D->getDeclContext()->isTranslationUnit()) || + isInCLinkageSpecification(D)) + return false; - // If we get here, mangle the decl name! - Out << "_Z"; - mangleFunctionEncoding(FD); return true; } -bool CXXNameMangler::mangle(const NamedDecl *D) { +void CXXNameMangler::mangle(const NamedDecl *D, llvm::StringRef Prefix) { // Any decl can be declared with __asm("foo") on it, and this takes precedence // over all other naming in the .o file. if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) { // If we have an asm name, then we use it as the mangling. Out << '\01'; // LLVM IR Marker for __asm("foo") Out << ALA->getLabel(); - return true; + return; } // <mangled-name> ::= _Z <encoding> // ::= <data name> // ::= <special-name> - - // FIXME: Actually use a visitor to decode these? + Out << Prefix; if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) - return mangleFunctionDecl(FD); - - if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { - if (!Context.getASTContext().getLangOptions().CPlusPlus || - isInCLinkageSpecification(D) || - D->getDeclContext()->isTranslationUnit()) - return false; - - Out << "_Z"; - mangleName(VD); - return true; - } - - return false; -} - -void CXXNameMangler::mangleCXXCtor(const CXXConstructorDecl *D, - CXXCtorType Type) { - assert(!Structor && "Structor already set!"); - Structor = D; - StructorType = Type; - - mangle(D); -} - -void CXXNameMangler::mangleCXXDtor(const CXXDestructorDecl *D, - CXXDtorType Type) { - assert(!Structor && "Structor already set!"); - Structor = D; - StructorType = Type; - - mangle(D); -} - -void CXXNameMangler::mangleCXXVtable(const CXXRecordDecl *RD) { - // <special-name> ::= TV <type> # virtual table - Out << "_ZTV"; - mangleName(RD); -} - -void CXXNameMangler::mangleCXXVTT(const CXXRecordDecl *RD) { - // <special-name> ::= TT <type> # VTT structure - Out << "_ZTT"; - mangleName(RD); -} - -void CXXNameMangler::mangleCXXCtorVtable(const CXXRecordDecl *RD, - int64_t Offset, - const CXXRecordDecl *Type) { - // <special-name> ::= TC <type> <offset number> _ <base type> - Out << "_ZTC"; - mangleName(RD); - Out << Offset; - Out << "_"; - mangleName(Type); -} - -void CXXNameMangler::mangleCXXRtti(QualType Ty) { - // <special-name> ::= TI <type> # typeinfo structure - Out << "_ZTI"; - - mangleType(Ty); -} - -void CXXNameMangler::mangleCXXRttiName(QualType Ty) { - // <special-name> ::= TS <type> # typeinfo name (null terminated byte string) - Out << "_ZTS"; - - mangleType(Ty); -} - -void CXXNameMangler::mangleGuardVariable(const VarDecl *D) { - // <special-name> ::= GV <object name> # Guard variable for one-time - // # initialization - - Out << "_ZGV"; - mangleName(D); + mangleFunctionEncoding(FD); + else + mangleName(cast<VarDecl>(D)); } void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) { // <encoding> ::= <function name> <bare-function-type> mangleName(FD); + // Don't mangle in the type if this isn't a decl we should typically mangle. + if (!Context.shouldMangleDeclName(FD)) + return; + // Whether the mangling of a function type includes the return type depends on // the context and the nature of the function. The rules for deciding whether // the return type is included are: @@ -277,7 +228,7 @@ void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) { if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD) || isa<CXXConversionDecl>(FD))) MangleReturnType = true; - + // Mangle the type of the primary template. FD = PrimaryTemplate->getTemplatedDecl(); } @@ -290,15 +241,16 @@ void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) { mangleBareFunctionType(FT, MangleReturnType); } -static bool isStdNamespace(const DeclContext *DC) { - if (!DC->isNamespace() || !DC->getParent()->isTranslationUnit()) - return false; - - const NamespaceDecl *NS = cast<NamespaceDecl>(DC); +static bool isStdNamespace(const NamespaceDecl *NS) { const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier(); return II && II->isStr("std"); } +static bool isStdNamespace(const DeclContext *DC) { + return DC->isNamespace() && DC->getParent()->isTranslationUnit() && + isStdNamespace(cast<NamespaceDecl>(DC)); +} + static const TemplateDecl * isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) { // Check if we have a function template. @@ -315,7 +267,7 @@ isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) { TemplateArgs = &Spec->getTemplateArgs(); return Spec->getSpecializedTemplate(); } - + return 0; } @@ -328,7 +280,7 @@ void CXXNameMangler::mangleName(const NamedDecl *ND) { const DeclContext *DC = ND->getDeclContext(); while (isa<LinkageSpecDecl>(DC)) DC = DC->getParent(); - + if (DC->isTranslationUnit() || isStdNamespace(DC)) { // Check if we have a template. const TemplateArgumentList *TemplateArgs = 0; @@ -341,24 +293,24 @@ void CXXNameMangler::mangleName(const NamedDecl *ND) { mangleUnscopedName(ND); return; } - + if (isa<FunctionDecl>(DC)) { mangleLocalName(ND); return; } - + mangleNestedName(ND); } -void CXXNameMangler::mangleName(const TemplateDecl *TD, +void CXXNameMangler::mangleName(const TemplateDecl *TD, const TemplateArgument *TemplateArgs, unsigned NumTemplateArgs) { const DeclContext *DC = TD->getDeclContext(); while (isa<LinkageSpecDecl>(DC)) { - assert(cast<LinkageSpecDecl>(DC)->getLanguage() == + assert(cast<LinkageSpecDecl>(DC)->getLanguage() == LinkageSpecDecl::lang_cxx && "Unexpected linkage decl!"); DC = DC->getParent(); } - + if (DC->isTranslationUnit() || isStdNamespace(DC)) { mangleUnscopedTemplateName(TD); mangleTemplateArgs(TemplateArgs, NumTemplateArgs); @@ -372,7 +324,7 @@ void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) { // ::= St <unqualified-name> # ::std:: if (isStdNamespace(ND->getDeclContext())) Out << "St"; - + mangleUnqualifiedName(ND); } @@ -381,61 +333,39 @@ void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) { // ::= <substitution> if (mangleSubstitution(ND)) return; - + mangleUnscopedName(ND->getTemplatedDecl()); addSubstitution(ND); } -void CXXNameMangler::mangleCalloffset(int64_t nv, int64_t v) { +void CXXNameMangler::mangleNumber(int64_t Number) { + // <number> ::= [n] <non-negative decimal integer> + if (Number < 0) { + Out << 'n'; + Number = -Number; + } + + Out << Number; +} + +void CXXNameMangler::mangleCallOffset(const ThunkAdjustment &Adjustment) { // <call-offset> ::= h <nv-offset> _ // ::= v <v-offset> _ // <nv-offset> ::= <offset number> # non-virtual base override - // <v-offset> ::= <offset nubmer> _ <virtual offset number> + // <v-offset> ::= <offset number> _ <virtual offset number> // # virtual base override, with vcall offset - if (v == 0) { - Out << "h"; - if (nv < 0) { - Out << "n"; - nv = -nv; - } - Out << nv; - } else { - Out << "v"; - if (nv < 0) { - Out << "n"; - nv = -nv; - } - Out << nv; - Out << "_"; - if (v < 0) { - Out << "n"; - v = -v; - } - Out << v; + if (!Adjustment.Virtual) { + Out << 'h'; + mangleNumber(Adjustment.NonVirtual); + Out << '_'; + return; } - Out << "_"; -} - -void CXXNameMangler::mangleThunk(const FunctionDecl *FD, int64_t nv, - int64_t v) { - // <special-name> ::= T <call-offset> <base encoding> - // # base is the nominal target function of thunk - Out << "_ZT"; - mangleCalloffset(nv, v); - mangleFunctionEncoding(FD); -} - - void CXXNameMangler::mangleCovariantThunk(const FunctionDecl *FD, - int64_t nv_t, int64_t v_t, - int64_t nv_r, int64_t v_r) { - // <special-name> ::= Tc <call-offset> <call-offset> <base encoding> - // # base is the nominal target function of thunk - // # first call-offset is 'this' adjustment - // # second call-offset is result adjustment - Out << "_ZTc"; - mangleCalloffset(nv_t, v_t); - mangleCalloffset(nv_r, v_r); - mangleFunctionEncoding(FD); + + Out << 'v'; + mangleNumber(Adjustment.NonVirtual); + Out << '_'; + mangleNumber(Adjustment.Virtual); + Out << '_'; } void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND) { @@ -453,13 +383,13 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND) { Out << "12_GLOBAL__N_1"; break; } - } + } if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) { mangleSourceName(II); break; } - + // We must have an anonymous struct. const TagDecl *TD = cast<TagDecl>(ND); if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) { @@ -470,12 +400,12 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND) { mangleSourceName(D->getDeclName().getAsIdentifierInfo()); break; } - + // Get a unique id for the anonymous struct. uint64_t AnonStructId = Context.getAnonymousStructId(TD); // Mangle it as a source name in the form - // [n] $_<id> + // [n] $_<id> // where n is the length of the string. llvm::SmallString<8> Str; Str += "$_"; @@ -525,6 +455,12 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND) { cast<FunctionDecl>(ND)->getNumParams()); break; + case DeclarationName::CXXLiteralOperatorName: + // Guessing based on existing ABI. + Out << "ul"; + mangleSourceName(Name.getCXXLiteralIdentifier()); + break; + case DeclarationName::CXXUsingDirective: assert(false && "Can't mangle a using directive name!"); break; @@ -545,29 +481,29 @@ void CXXNameMangler::mangleNestedName(const NamedDecl *ND) { Out << 'N'; if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) mangleQualifiers(Qualifiers::fromCVRMask(Method->getTypeQualifiers())); - + // Check if we have a template. const TemplateArgumentList *TemplateArgs = 0; - if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) { + if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) { mangleTemplatePrefix(TD); mangleTemplateArgumentList(*TemplateArgs); } else { manglePrefix(ND->getDeclContext()); mangleUnqualifiedName(ND); } - + Out << 'E'; } -void CXXNameMangler::mangleNestedName(const TemplateDecl *TD, +void CXXNameMangler::mangleNestedName(const TemplateDecl *TD, const TemplateArgument *TemplateArgs, unsigned NumTemplateArgs) { // <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E Out << 'N'; - + mangleTemplatePrefix(TD); mangleTemplateArgs(TemplateArgs, NumTemplateArgs); - + Out << 'E'; } @@ -591,23 +527,23 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC) { while (isa<LinkageSpecDecl>(DC)) DC = DC->getParent(); - + if (DC->isTranslationUnit()) return; - + if (mangleSubstitution(cast<NamedDecl>(DC))) return; // Check if we have a template. const TemplateArgumentList *TemplateArgs = 0; - if (const TemplateDecl *TD = isTemplate(cast<NamedDecl>(DC), TemplateArgs)) { + if (const TemplateDecl *TD = isTemplate(cast<NamedDecl>(DC), TemplateArgs)) { mangleTemplatePrefix(TD); mangleTemplateArgumentList(*TemplateArgs); } else { manglePrefix(DC->getParent()); mangleUnqualifiedName(cast<NamedDecl>(DC)); } - + addSubstitution(cast<NamedDecl>(DC)); } @@ -618,12 +554,12 @@ void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) { if (mangleSubstitution(ND)) return; - + // FIXME: <template-param> - + manglePrefix(ND->getDeclContext()); mangleUnqualifiedName(ND->getTemplatedDecl()); - + addSubstitution(ND); } @@ -838,6 +774,7 @@ void CXXNameMangler::mangleType(const BuiltinType *T) { break; case BuiltinType::ObjCId: Out << "11objc_object"; break; case BuiltinType::ObjCClass: Out << "10objc_class"; break; + case BuiltinType::ObjCSel: Out << "13objc_selector"; break; } } @@ -992,7 +929,7 @@ void CXXNameMangler::mangleType(const FixedWidthIntType *T) { void CXXNameMangler::mangleType(const TemplateSpecializationType *T) { TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl(); assert(TD && "FIXME: Support dependent template names!"); - + mangleName(TD, T->getArgs(), T->getNumArgs()); } @@ -1001,7 +938,7 @@ void CXXNameMangler::mangleType(const TypenameType *T) { Out << 'N'; const Type *QTy = T->getQualifier()->getAsType(); - if (const TemplateSpecializationType *TST = + if (const TemplateSpecializationType *TST = dyn_cast<TemplateSpecializationType>(QTy)) { if (!mangleSubstitution(QualType(TST, 0))) { TemplateDecl *TD = TST->getTemplateName().getAsTemplateDecl(); @@ -1010,7 +947,7 @@ void CXXNameMangler::mangleType(const TypenameType *T) { mangleTemplateArgs(TST->getArgs(), TST->getNumArgs()); addSubstitution(QualType(TST, 0)); } - } else if (const TemplateTypeParmType *TTPT = + } else if (const TemplateTypeParmType *TTPT = dyn_cast<TemplateTypeParmType>(QTy)) { // We use the QualType mangle type variant here because it handles // substitutions. @@ -1019,7 +956,7 @@ void CXXNameMangler::mangleType(const TypenameType *T) { assert(false && "Unhandled type!"); mangleSourceName(T->getIdentifier()); - + Out << 'E'; } @@ -1047,7 +984,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) { case Expr::DeclRefExprClass: { const Decl *D = cast<DeclRefExpr>(E)->getDecl(); - + switch (D->getKind()) { default: assert(false && "Unhandled decl kind!"); case Decl::NonTypeTemplateParm: { @@ -1057,23 +994,23 @@ void CXXNameMangler::mangleExpression(const Expr *E) { } } - + break; } - - case Expr::UnresolvedDeclRefExprClass: { - const UnresolvedDeclRefExpr *DRE = cast<UnresolvedDeclRefExpr>(E); + + case Expr::DependentScopeDeclRefExprClass: { + const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E); const Type *QTy = DRE->getQualifier()->getAsType(); assert(QTy && "Qualifier was not type!"); // ::= sr <type> <unqualified-name> # dependent name Out << "sr"; mangleType(QualType(QTy, 0)); - + assert(DRE->getDeclName().getNameKind() == DeclarationName::Identifier && "Unhandled decl name kind!"); mangleSourceName(DRE->getDeclName().getAsIdentifierInfo()); - + break; } @@ -1122,13 +1059,8 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) { void CXXNameMangler::mangleTemplateArgumentList(const TemplateArgumentList &L) { // <template-args> ::= I <template-arg>+ E Out << "I"; - - for (unsigned i = 0, e = L.size(); i != e; ++i) { - const TemplateArgument &A = L[i]; - - mangleTemplateArgument(A); - } - + for (unsigned i = 0, e = L.size(); i != e; ++i) + mangleTemplateArgument(L[i]); Out << "E"; } @@ -1136,11 +1068,8 @@ void CXXNameMangler::mangleTemplateArgs(const TemplateArgument *TemplateArgs, unsigned NumTemplateArgs) { // <template-args> ::= I <template-arg>+ E Out << "I"; - - for (unsigned i = 0; i != NumTemplateArgs; ++i) { + for (unsigned i = 0; i != NumTemplateArgs; ++i) mangleTemplateArgument(TemplateArgs[i]); - } - Out << "E"; } @@ -1161,14 +1090,12 @@ void CXXNameMangler::mangleTemplateArgument(const TemplateArgument &A) { mangleExpression(A.getAsExpr()); Out << 'E'; break; - case TemplateArgument::Integral: + case TemplateArgument::Integral: { // <expr-primary> ::= L <type> <value number> E # integer literal + const llvm::APSInt *Integral = A.getAsIntegral(); Out << 'L'; - mangleType(A.getIntegralType()); - - const llvm::APSInt *Integral = A.getAsIntegral(); if (A.getIntegralType()->isBooleanType()) { // Boolean values are encoded as 0/1. Out << (Integral->getBoolValue() ? '1' : '0'); @@ -1177,10 +1104,27 @@ void CXXNameMangler::mangleTemplateArgument(const TemplateArgument &A) { Out << 'n'; Integral->abs().print(Out, false); } + Out << 'E'; + break; + } + case TemplateArgument::Declaration: { + // <expr-primary> ::= L <mangled-name> E # external name + // FIXME: Clang produces AST's where pointer-to-member-function expressions + // and pointer-to-function expressions are represented as a declaration not + // an expression; this is not how gcc represents them and this changes the + // mangling. + Out << 'L'; + // References to external entities use the mangled name; if the name would + // not normally be manged then mangle it as unqualified. + // + // FIXME: The ABI specifies that external names here should have _Z, but + // gcc leaves this off. + mangle(cast<NamedDecl>(A.getAsDecl()), "Z"); Out << 'E'; break; } + } } void CXXNameMangler::mangleTemplateParameter(unsigned Index) { @@ -1198,7 +1142,7 @@ bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) { // Try one of the standard substitutions first. if (mangleStandardSubstitution(ND)) return true; - + ND = cast<NamedDecl>(ND->getCanonicalDecl()); return mangleSubstitution(reinterpret_cast<uintptr_t>(ND)); } @@ -1208,79 +1152,79 @@ bool CXXNameMangler::mangleSubstitution(QualType T) { if (const RecordType *RT = T->getAs<RecordType>()) return mangleSubstitution(RT->getDecl()); } - + uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr()); return mangleSubstitution(TypePtr); } bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) { - llvm::DenseMap<uintptr_t, unsigned>::iterator I = + llvm::DenseMap<uintptr_t, unsigned>::iterator I = Substitutions.find(Ptr); if (I == Substitutions.end()) return false; - + unsigned SeqID = I->second; if (SeqID == 0) Out << "S_"; else { SeqID--; - + // <seq-id> is encoded in base-36, using digits and upper case letters. char Buffer[10]; char *BufferPtr = Buffer + 9; - + *BufferPtr = 0; if (SeqID == 0) *--BufferPtr = '0'; - + while (SeqID) { assert(BufferPtr > Buffer && "Buffer overflow!"); - + unsigned char c = static_cast<unsigned char>(SeqID) % 36; - + *--BufferPtr = (c < 10 ? '0' + c : 'A' + c - 10); SeqID /= 36; } - + Out << 'S' << BufferPtr << '_'; } - + return true; } static bool isCharType(QualType T) { if (T.isNull()) return false; - + return T->isSpecificBuiltinType(BuiltinType::Char_S) || T->isSpecificBuiltinType(BuiltinType::Char_U); } -/// isCharSpecialization - Returns whether a given type is a template +/// isCharSpecialization - Returns whether a given type is a template /// specialization of a given name with a single argument of type char. static bool isCharSpecialization(QualType T, const char *Name) { if (T.isNull()) return false; - + const RecordType *RT = T->getAs<RecordType>(); if (!RT) return false; - - const ClassTemplateSpecializationDecl *SD = + + const ClassTemplateSpecializationDecl *SD = dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl()); if (!SD) return false; if (!isStdNamespace(SD->getDeclContext())) return false; - + const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs(); if (TemplateArgs.size() != 1) return false; - + if (!isCharType(TemplateArgs[0].getAsType())) return false; - + return SD->getIdentifier()->getName() == Name; } @@ -1298,55 +1242,55 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) { if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) { if (!isStdNamespace(TD->getDeclContext())) return false; - + // <substitution> ::= Sa # ::std::allocator if (TD->getIdentifier()->isStr("allocator")) { Out << "Sa"; return true; } - + // <<substitution> ::= Sb # ::std::basic_string if (TD->getIdentifier()->isStr("basic_string")) { Out << "Sb"; return true; } } - - if (const ClassTemplateSpecializationDecl *SD = + + if (const ClassTemplateSpecializationDecl *SD = dyn_cast<ClassTemplateSpecializationDecl>(ND)) { // <substitution> ::= Ss # ::std::basic_string<char, // ::std::char_traits<char>, // ::std::allocator<char> > if (SD->getIdentifier()->isStr("basic_string")) { const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs(); - + if (TemplateArgs.size() != 3) return false; - + if (!isCharType(TemplateArgs[0].getAsType())) return false; - + if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits")) return false; - + if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator")) return false; Out << "Ss"; return true; } - - // <substitution> ::= So # ::std::basic_ostream<char, + + // <substitution> ::= So # ::std::basic_ostream<char, // ::std::char_traits<char> > if (SD->getIdentifier()->isStr("basic_ostream")) { const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs(); - + if (TemplateArgs.size() != 2) return false; - + if (!isCharType(TemplateArgs[0].getAsType())) return false; - + if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits")) return false; @@ -1364,138 +1308,144 @@ void CXXNameMangler::addSubstitution(QualType T) { return; } } - + uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr()); addSubstitution(TypePtr); } void CXXNameMangler::addSubstitution(uintptr_t Ptr) { unsigned SeqID = Substitutions.size(); - - assert(!Substitutions.count(Ptr) && "Substitution already exists!"); - Substitutions[Ptr] = SeqID; -} - -namespace clang { - /// \brief Mangles the name of the declaration D and emits that name to the - /// given output stream. - /// - /// If the declaration D requires a mangled name, this routine will emit that - /// mangled name to \p os and return true. Otherwise, \p os will be unchanged - /// and this routine will return false. In this case, the caller should just - /// emit the identifier of the declaration (\c D->getIdentifier()) as its - /// name. - bool mangleName(MangleContext &Context, const NamedDecl *D, - llvm::raw_ostream &os) { - assert(!isa<CXXConstructorDecl>(D) && - "Use mangleCXXCtor for constructor decls!"); - assert(!isa<CXXDestructorDecl>(D) && - "Use mangleCXXDtor for destructor decls!"); - - PrettyStackTraceDecl CrashInfo(const_cast<NamedDecl *>(D), SourceLocation(), - Context.getASTContext().getSourceManager(), - "Mangling declaration"); - - CXXNameMangler Mangler(Context, os); - if (!Mangler.mangle(D)) - return false; - - os.flush(); - return true; - } - - /// \brief Mangles the a thunk with the offset n for the declaration D and - /// emits that name to the given output stream. - void mangleThunk(MangleContext &Context, const FunctionDecl *FD, - int64_t nv, int64_t v, llvm::raw_ostream &os) { - // FIXME: Hum, we might have to thunk these, fix. - assert(!isa<CXXDestructorDecl>(FD) && - "Use mangleCXXDtor for destructor decls!"); - - CXXNameMangler Mangler(Context, os); - Mangler.mangleThunk(FD, nv, v); - os.flush(); - } - - /// \brief Mangles the a covariant thunk for the declaration D and emits that - /// name to the given output stream. - void mangleCovariantThunk(MangleContext &Context, const FunctionDecl *FD, - int64_t nv_t, int64_t v_t, - int64_t nv_r, int64_t v_r, - llvm::raw_ostream &os) { - // FIXME: Hum, we might have to thunk these, fix. - assert(!isa<CXXDestructorDecl>(FD) && - "Use mangleCXXDtor for destructor decls!"); - - CXXNameMangler Mangler(Context, os); - Mangler.mangleCovariantThunk(FD, nv_t, v_t, nv_r, v_r); - os.flush(); - } - - /// mangleGuardVariable - Returns the mangled name for a guard variable - /// for the passed in VarDecl. - void mangleGuardVariable(MangleContext &Context, const VarDecl *D, - llvm::raw_ostream &os) { - CXXNameMangler Mangler(Context, os); - Mangler.mangleGuardVariable(D); - - os.flush(); - } - - void mangleCXXCtor(MangleContext &Context, const CXXConstructorDecl *D, - CXXCtorType Type, llvm::raw_ostream &os) { - CXXNameMangler Mangler(Context, os); - Mangler.mangleCXXCtor(D, Type); - os.flush(); - } - - void mangleCXXDtor(MangleContext &Context, const CXXDestructorDecl *D, - CXXDtorType Type, llvm::raw_ostream &os) { - CXXNameMangler Mangler(Context, os); - Mangler.mangleCXXDtor(D, Type); + assert(!Substitutions.count(Ptr) && "Substitution already exists!"); + Substitutions[Ptr] = SeqID; +} - os.flush(); - } +// - void mangleCXXVtable(MangleContext &Context, const CXXRecordDecl *RD, - llvm::raw_ostream &os) { - CXXNameMangler Mangler(Context, os); - Mangler.mangleCXXVtable(RD); +/// \brief Mangles the name of the declaration D and emits that name to the +/// given output stream. +/// +/// If the declaration D requires a mangled name, this routine will emit that +/// mangled name to \p os and return true. Otherwise, \p os will be unchanged +/// and this routine will return false. In this case, the caller should just +/// emit the identifier of the declaration (\c D->getIdentifier()) as its +/// name. +void MangleContext::mangleName(const NamedDecl *D, + llvm::SmallVectorImpl<char> &Res) { + assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) && + "Invalid mangleName() call, argument is not a variable or function!"); + assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) && + "Invalid mangleName() call on 'structor decl!"); + + PrettyStackTraceDecl CrashInfo(D, SourceLocation(), + getASTContext().getSourceManager(), + "Mangling declaration"); + + CXXNameMangler Mangler(*this, Res); + return Mangler.mangle(D); +} + +void MangleContext::mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type, + llvm::SmallVectorImpl<char> &Res) { + CXXNameMangler Mangler(*this, Res, D, Type); + Mangler.mangle(D); +} + +void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type, + llvm::SmallVectorImpl<char> &Res) { + CXXNameMangler Mangler(*this, Res, D, Type); + Mangler.mangle(D); +} + +/// \brief Mangles the a thunk with the offset n for the declaration D and +/// emits that name to the given output stream. +void MangleContext::mangleThunk(const FunctionDecl *FD, + const ThunkAdjustment &ThisAdjustment, + llvm::SmallVectorImpl<char> &Res) { + // FIXME: Hum, we might have to thunk these, fix. + assert(!isa<CXXDestructorDecl>(FD) && + "Use mangleCXXDtor for destructor decls!"); - os.flush(); - } - - void mangleCXXVTT(MangleContext &Context, const CXXRecordDecl *RD, - llvm::raw_ostream &os) { - CXXNameMangler Mangler(Context, os); - Mangler.mangleCXXVTT(RD); + // <special-name> ::= T <call-offset> <base encoding> + // # base is the nominal target function of thunk + CXXNameMangler Mangler(*this, Res); + Mangler.getStream() << "_ZT"; + Mangler.mangleCallOffset(ThisAdjustment); + Mangler.mangleFunctionEncoding(FD); +} + +/// \brief Mangles the a covariant thunk for the declaration D and emits that +/// name to the given output stream. +void +MangleContext::mangleCovariantThunk(const FunctionDecl *FD, + const CovariantThunkAdjustment& Adjustment, + llvm::SmallVectorImpl<char> &Res) { + // FIXME: Hum, we might have to thunk these, fix. + assert(!isa<CXXDestructorDecl>(FD) && + "Use mangleCXXDtor for destructor decls!"); - os.flush(); - } + // <special-name> ::= Tc <call-offset> <call-offset> <base encoding> + // # base is the nominal target function of thunk + // # first call-offset is 'this' adjustment + // # second call-offset is result adjustment + CXXNameMangler Mangler(*this, Res); + Mangler.getStream() << "_ZTc"; + Mangler.mangleCallOffset(Adjustment.ThisAdjustment); + Mangler.mangleCallOffset(Adjustment.ReturnAdjustment); + Mangler.mangleFunctionEncoding(FD); +} - void mangleCXXCtorVtable(MangleContext &Context, const CXXRecordDecl *RD, - int64_t Offset, const CXXRecordDecl *Type, - llvm::raw_ostream &os) { - CXXNameMangler Mangler(Context, os); - Mangler.mangleCXXCtorVtable(RD, Offset, Type); +/// mangleGuardVariable - Returns the mangled name for a guard variable +/// for the passed in VarDecl. +void MangleContext::mangleGuardVariable(const VarDecl *D, + llvm::SmallVectorImpl<char> &Res) { + // <special-name> ::= GV <object name> # Guard variable for one-time + // # initialization + CXXNameMangler Mangler(*this, Res); + Mangler.getStream() << "_ZGV"; + Mangler.mangleName(D); +} - os.flush(); - } +void MangleContext::mangleCXXVtable(const CXXRecordDecl *RD, + llvm::SmallVectorImpl<char> &Res) { + // <special-name> ::= TV <type> # virtual table + CXXNameMangler Mangler(*this, Res); + Mangler.getStream() << "_ZTV"; + Mangler.mangleName(RD); +} - void mangleCXXRtti(MangleContext &Context, QualType Ty, - llvm::raw_ostream &os) { - CXXNameMangler Mangler(Context, os); - Mangler.mangleCXXRtti(Ty); +void MangleContext::mangleCXXVTT(const CXXRecordDecl *RD, + llvm::SmallVectorImpl<char> &Res) { + // <special-name> ::= TT <type> # VTT structure + CXXNameMangler Mangler(*this, Res); + Mangler.getStream() << "_ZTT"; + Mangler.mangleName(RD); +} - os.flush(); - } +void MangleContext::mangleCXXCtorVtable(const CXXRecordDecl *RD, int64_t Offset, + const CXXRecordDecl *Type, + llvm::SmallVectorImpl<char> &Res) { + // <special-name> ::= TC <type> <offset number> _ <base type> + CXXNameMangler Mangler(*this, Res); + Mangler.getStream() << "_ZTC"; + Mangler.mangleName(RD); + Mangler.getStream() << Offset; + Mangler.getStream() << "_"; + Mangler.mangleName(Type); +} - void mangleCXXRttiName(MangleContext &Context, QualType Ty, - llvm::raw_ostream &os) { - CXXNameMangler Mangler(Context, os); - Mangler.mangleCXXRttiName(Ty); +void MangleContext::mangleCXXRtti(QualType Ty, + llvm::SmallVectorImpl<char> &Res) { + // <special-name> ::= TI <type> # typeinfo structure + CXXNameMangler Mangler(*this, Res); + Mangler.getStream() << "_ZTI"; + Mangler.mangleType(Ty); +} - os.flush(); - } +void MangleContext::mangleCXXRttiName(QualType Ty, + llvm::SmallVectorImpl<char> &Res) { + // <special-name> ::= TS <type> # typeinfo name (null terminated byte string) + CXXNameMangler Mangler(*this, Res); + Mangler.getStream() << "_ZTS"; + Mangler.mangleType(Ty); } diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h index 458708f..65b1d9f 100644 --- a/lib/CodeGen/Mangle.h +++ b/lib/CodeGen/Mangle.h @@ -23,7 +23,7 @@ #include "llvm/ADT/DenseMap.h" namespace llvm { - class raw_ostream; + template<typename T> class SmallVectorImpl; } namespace clang { @@ -34,50 +34,59 @@ namespace clang { class NamedDecl; class VarDecl; - class MangleContext { - ASTContext &Context; - - llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds; +namespace CodeGen { + class CovariantThunkAdjustment; + class ThunkAdjustment; + +/// MangleContext - Context for tracking state which persists across multiple +/// calls to the C++ name mangler. +class MangleContext { + ASTContext &Context; - public: - explicit MangleContext(ASTContext &Context) + llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds; + +public: + explicit MangleContext(ASTContext &Context) : Context(Context) { } - - ASTContext &getASTContext() const { return Context; } - - uint64_t getAnonymousStructId(const TagDecl *TD) { - std::pair<llvm::DenseMap<const TagDecl *, - uint64_t>::iterator, bool> Result = + + ASTContext &getASTContext() const { return Context; } + + uint64_t getAnonymousStructId(const TagDecl *TD) { + std::pair<llvm::DenseMap<const TagDecl *, + uint64_t>::iterator, bool> Result = AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size())); - return Result.first->second; - } - }; + return Result.first->second; + } + + /// @name Mangler Entry Points + /// @{ - bool mangleName(MangleContext &Context, const NamedDecl *D, - llvm::raw_ostream &os); - void mangleThunk(MangleContext &Context, const FunctionDecl *FD, - int64_t n, int64_t vn, llvm::raw_ostream &os); - void mangleCovariantThunk(MangleContext &Context, const FunctionDecl *FD, - int64_t nv_t, int64_t v_t, - int64_t nv_r, int64_t v_r, - llvm::raw_ostream &os); - void mangleGuardVariable(MangleContext &Context, const VarDecl *D, - llvm::raw_ostream &os); - void mangleCXXVtable(MangleContext &Context, const CXXRecordDecl *RD, - llvm::raw_ostream &os); - void mangleCXXVTT(MangleContext &Context, const CXXRecordDecl *RD, - llvm::raw_ostream &os); - void mangleCXXCtorVtable(MangleContext &Context, const CXXRecordDecl *RD, - int64_t Offset, const CXXRecordDecl *Type, - llvm::raw_ostream &os); - void mangleCXXRtti(MangleContext &Context, QualType T, - llvm::raw_ostream &os); - void mangleCXXRttiName(MangleContext &Context, QualType T, - llvm::raw_ostream &os); - void mangleCXXCtor(MangleContext &Context, const CXXConstructorDecl *D, - CXXCtorType Type, llvm::raw_ostream &os); - void mangleCXXDtor(MangleContext &Context, const CXXDestructorDecl *D, - CXXDtorType Type, llvm::raw_ostream &os); + bool shouldMangleDeclName(const NamedDecl *D); + + void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &); + void mangleThunk(const FunctionDecl *FD, + const ThunkAdjustment &ThisAdjustment, + llvm::SmallVectorImpl<char> &); + void mangleCovariantThunk(const FunctionDecl *FD, + const CovariantThunkAdjustment& Adjustment, + llvm::SmallVectorImpl<char> &); + void mangleGuardVariable(const VarDecl *D, llvm::SmallVectorImpl<char> &); + void mangleCXXVtable(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &); + void mangleCXXVTT(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &); + void mangleCXXCtorVtable(const CXXRecordDecl *RD, int64_t Offset, + const CXXRecordDecl *Type, + llvm::SmallVectorImpl<char> &); + void mangleCXXRtti(QualType T, llvm::SmallVectorImpl<char> &); + void mangleCXXRttiName(QualType T, llvm::SmallVectorImpl<char> &); + void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type, + llvm::SmallVectorImpl<char> &); + void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type, + llvm::SmallVectorImpl<char> &); + + /// @} +}; + +} } #endif diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp index 1d8f31d..017059d 100644 --- a/lib/CodeGen/ModuleBuilder.cpp +++ b/lib/CodeGen/ModuleBuilder.cpp @@ -22,13 +22,11 @@ #include "llvm/LLVMContext.h" #include "llvm/Module.h" #include "llvm/Target/TargetData.h" -#include "llvm/Support/Compiler.h" #include "llvm/ADT/OwningPtr.h" using namespace clang; - namespace { - class VISIBILITY_HIDDEN CodeGeneratorImpl : public CodeGenerator { + class CodeGeneratorImpl : public CodeGenerator { Diagnostic &Diags; llvm::OwningPtr<const llvm::TargetData> TD; ASTContext *Ctx; diff --git a/lib/CodeGen/TargetABIInfo.cpp b/lib/CodeGen/TargetABIInfo.cpp index ba0bc66..2bc6175 100644 --- a/lib/CodeGen/TargetABIInfo.cpp +++ b/lib/CodeGen/TargetABIInfo.cpp @@ -771,7 +771,7 @@ void X86_64ABIInfo::classify(QualType Ty, // reference. if (hasNonTrivialDestructorOrCopyConstructor(RT)) return; - + const RecordDecl *RD = RT->getDecl(); // Assume variable sized types are passed in memory. @@ -782,6 +782,32 @@ void X86_64ABIInfo::classify(QualType Ty, // Reset Lo class, this will be recomputed. Current = NoClass; + + // If this is a C++ record, classify the bases first. + if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { + for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), + e = CXXRD->bases_end(); i != e; ++i) { + assert(!i->isVirtual() && !i->getType()->isDependentType() && + "Unexpected base class!"); + const CXXRecordDecl *Base = + cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); + + // Classify this field. + // + // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a + // single eightbyte, each is classified separately. Each eightbyte gets + // initialized to class NO_CLASS. + Class FieldLo, FieldHi; + uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base); + classify(i->getType(), Context, Offset, FieldLo, FieldHi); + Lo = merge(Lo, FieldLo); + Hi = merge(Hi, FieldHi); + if (Lo == Memory || Hi == Memory) + break; + } + } + + // Classify the fields one at a time, merging the results. unsigned idx = 0; for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i, ++idx) { |