diff options
Diffstat (limited to 'lib/CodeGen')
36 files changed, 2164 insertions, 2765 deletions
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp index c10a401..5097341 100644 --- a/lib/CodeGen/CGBlocks.cpp +++ b/lib/CodeGen/CGBlocks.cpp @@ -17,6 +17,7 @@ #include "CodeGenModule.h" #include "clang/AST/DeclObjC.h" #include "llvm/Module.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/Target/TargetData.h" #include <algorithm> @@ -192,7 +193,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { CallArgList Args; CodeGenTypes &Types = CGM.getTypes(); const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args, - CC_Default, false); + FunctionType::ExtInfo()); if (CGM.ReturnTypeUsesSret(FnInfo)) flags |= BLOCK_USE_STRET; } @@ -472,8 +473,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E, QualType ResultType = FuncTy->getResultType(); const CGFunctionInfo &FnInfo = - CGM.getTypes().getFunctionInfo(ResultType, Args, FuncTy->getCallConv(), - FuncTy->getNoReturnAttr()); + CGM.getTypes().getFunctionInfo(ResultType, Args, + FuncTy->getExtInfo()); // Cast the function pointer to the right type. const llvm::Type *BlockFTy = @@ -678,8 +679,7 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, const FunctionType *BlockFunctionType = BExpr->getFunctionType(); QualType ResultType; - CallingConv CC = BlockFunctionType->getCallConv(); - bool NoReturn = BlockFunctionType->getNoReturnAttr(); + FunctionType::ExtInfo EInfo = getFunctionExtInfo(*BlockFunctionType); bool IsVariadic; if (const FunctionProtoType *FTy = dyn_cast<FunctionProtoType>(BlockFunctionType)) { @@ -718,7 +718,7 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, Args.push_back(std::make_pair(*i, (*i)->getType())); const CGFunctionInfo &FI = - CGM.getTypes().getFunctionInfo(ResultType, Args, CC, NoReturn); + CGM.getTypes().getFunctionInfo(ResultType, Args, EInfo); CodeGenTypes &Types = CGM.getTypes(); const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic); @@ -843,7 +843,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, Args.push_back(std::make_pair(Src, Src->getType())); const CGFunctionInfo &FI = - CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false); + CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); // FIXME: We'd like to put these into a mergable by content, with // internal linkage. @@ -924,7 +924,7 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose, Args.push_back(std::make_pair(Src, Src->getType())); const CGFunctionInfo &FI = - CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false); + CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); // FIXME: We'd like to put these into a mergable by content, with // internal linkage. @@ -1008,7 +1008,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { Args.push_back(std::make_pair(Src, Src->getType())); const CGFunctionInfo &FI = - CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false); + CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); CodeGenTypes &Types = CGM.getTypes(); const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); @@ -1071,7 +1071,7 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T, Args.push_back(std::make_pair(Src, Src->getType())); const CGFunctionInfo &FI = - CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false); + CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); CodeGenTypes &Types = CGM.getTypes(); const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h index e91319f..efee0e3 100644 --- a/lib/CodeGen/CGBlocks.h +++ b/lib/CodeGen/CGBlocks.h @@ -17,7 +17,6 @@ #include "CodeGenTypes.h" #include "clang/AST/Type.h" #include "llvm/Module.h" -#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "clang/Basic/TargetInfo.h" #include "clang/AST/CharUnits.h" diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 419ed73..a9b0b64 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -81,10 +81,6 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF, Value *Args[2] = { CGF.EmitScalarExpr(E->getArg(0)), CGF.EmitScalarExpr(E->getArg(1)) }; Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2); - - if (Id == Intrinsic::atomic_load_nand) - Result = CGF.Builder.CreateNot(Result); - return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Args[1])); } @@ -550,12 +546,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__sync_fetch_and_xor_8: case Builtin::BI__sync_fetch_and_xor_16: return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E); - case Builtin::BI__sync_fetch_and_nand_1: - case Builtin::BI__sync_fetch_and_nand_2: - case Builtin::BI__sync_fetch_and_nand_4: - case Builtin::BI__sync_fetch_and_nand_8: - case Builtin::BI__sync_fetch_and_nand_16: - return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E); // Clang extensions: not overloaded yet. case Builtin::BI__sync_fetch_and_min: @@ -602,13 +592,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__sync_xor_and_fetch_16: return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E, llvm::Instruction::Xor); - case Builtin::BI__sync_nand_and_fetch_1: - case Builtin::BI__sync_nand_and_fetch_2: - case Builtin::BI__sync_nand_and_fetch_4: - case Builtin::BI__sync_nand_and_fetch_8: - case Builtin::BI__sync_nand_and_fetch_16: - return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E, - llvm::Instruction::And); case Builtin::BI__sync_val_compare_and_swap_1: case Builtin::BI__sync_val_compare_and_swap_2: diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp index b88001c..93a182f 100644 --- a/lib/CodeGen/CGCXX.cpp +++ b/lib/CodeGen/CGCXX.cpp @@ -297,311 +297,6 @@ void CodeGenModule::getMangledCXXDtorName(MangleBuffer &Name, getMangleContext().mangleCXXDtor(D, Type, Name.getBuffer()); } -llvm::Constant * -CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD, - bool Extern, - const ThunkAdjustment &ThisAdjustment) { - return GenerateCovariantThunk(Fn, GD, Extern, - CovariantThunkAdjustment(ThisAdjustment, - ThunkAdjustment())); -} - -llvm::Value * -CodeGenFunction::DynamicTypeAdjust(llvm::Value *V, - const ThunkAdjustment &Adjustment) { - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); - - const llvm::Type *OrigTy = V->getType(); - if (Adjustment.NonVirtual) { - // Do the non-virtual adjustment - V = Builder.CreateBitCast(V, Int8PtrTy); - V = Builder.CreateConstInBoundsGEP1_64(V, Adjustment.NonVirtual); - V = Builder.CreateBitCast(V, OrigTy); - } - - if (!Adjustment.Virtual) - return V; - - assert(Adjustment.Virtual % (LLVMPointerWidth / 8) == 0 && - "vtable entry unaligned"); - - // Do the virtual this adjustment - const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType()); - const llvm::Type *PtrDiffPtrTy = PtrDiffTy->getPointerTo(); - - llvm::Value *ThisVal = Builder.CreateBitCast(V, Int8PtrTy); - V = Builder.CreateBitCast(V, PtrDiffPtrTy->getPointerTo()); - V = Builder.CreateLoad(V, "vtable"); - - llvm::Value *VTablePtr = V; - uint64_t VirtualAdjustment = Adjustment.Virtual / (LLVMPointerWidth / 8); - V = Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment); - V = Builder.CreateLoad(V); - V = Builder.CreateGEP(ThisVal, V); - - return Builder.CreateBitCast(V, OrigTy); -} - -llvm::Constant * -CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn, - GlobalDecl GD, bool Extern, - const CovariantThunkAdjustment &Adjustment) { - const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); - const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); - QualType ResultType = FPT->getResultType(); - - FunctionArgList Args; - ImplicitParamDecl *ThisDecl = - ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, - MD->getThisType(getContext())); - Args.push_back(std::make_pair(ThisDecl, ThisDecl->getType())); - for (FunctionDecl::param_const_iterator i = MD->param_begin(), - e = MD->param_end(); - i != e; ++i) { - ParmVarDecl *D = *i; - Args.push_back(std::make_pair(D, D->getType())); - } - IdentifierInfo *II - = &CGM.getContext().Idents.get("__thunk_named_foo_"); - FunctionDecl *FD = FunctionDecl::Create(getContext(), - getContext().getTranslationUnitDecl(), - SourceLocation(), II, ResultType, 0, - Extern - ? FunctionDecl::Extern - : FunctionDecl::Static, - false, true); - StartFunction(FD, ResultType, Fn, Args, SourceLocation()); - - // generate body - const llvm::Type *Ty = - CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), - FPT->isVariadic()); - llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty); - - CallArgList CallArgs; - - bool ShouldAdjustReturnPointer = true; - QualType ArgType = MD->getThisType(getContext()); - llvm::Value *Arg = Builder.CreateLoad(LocalDeclMap[ThisDecl], "this"); - if (!Adjustment.ThisAdjustment.isEmpty()) { - // Do the this adjustment. - const llvm::Type *OrigTy = Callee->getType(); - Arg = DynamicTypeAdjust(Arg, Adjustment.ThisAdjustment); - - if (!Adjustment.ReturnAdjustment.isEmpty()) { - const CovariantThunkAdjustment &ReturnAdjustment = - CovariantThunkAdjustment(ThunkAdjustment(), - Adjustment.ReturnAdjustment); - - Callee = CGM.BuildCovariantThunk(GD, Extern, ReturnAdjustment); - - Callee = Builder.CreateBitCast(Callee, OrigTy); - ShouldAdjustReturnPointer = false; - } - } - - CallArgs.push_back(std::make_pair(RValue::get(Arg), ArgType)); - - for (FunctionDecl::param_const_iterator i = MD->param_begin(), - e = MD->param_end(); - i != e; ++i) { - ParmVarDecl *D = *i; - QualType ArgType = D->getType(); - - // llvm::Value *Arg = CGF.GetAddrOfLocalVar(Dst); - Expr *Arg = new (getContext()) DeclRefExpr(D, ArgType.getNonReferenceType(), - SourceLocation()); - CallArgs.push_back(std::make_pair(EmitCallArg(Arg, ArgType), ArgType)); - } - - RValue RV = EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs, - FPT->getCallConv(), - FPT->getNoReturnAttr()), - Callee, ReturnValueSlot(), CallArgs, MD); - if (ShouldAdjustReturnPointer && !Adjustment.ReturnAdjustment.isEmpty()) { - bool CanBeZero = !(ResultType->isReferenceType() - // FIXME: attr nonnull can't be zero either - /* || ResultType->hasAttr<NonNullAttr>() */ ); - // Do the return result adjustment. - if (CanBeZero) { - llvm::BasicBlock *NonZeroBlock = createBasicBlock(); - llvm::BasicBlock *ZeroBlock = createBasicBlock(); - llvm::BasicBlock *ContBlock = createBasicBlock(); - - const llvm::Type *Ty = RV.getScalarVal()->getType(); - llvm::Value *Zero = llvm::Constant::getNullValue(Ty); - Builder.CreateCondBr(Builder.CreateICmpNE(RV.getScalarVal(), Zero), - NonZeroBlock, ZeroBlock); - EmitBlock(NonZeroBlock); - llvm::Value *NZ = - DynamicTypeAdjust(RV.getScalarVal(), Adjustment.ReturnAdjustment); - EmitBranch(ContBlock); - EmitBlock(ZeroBlock); - llvm::Value *Z = RV.getScalarVal(); - EmitBlock(ContBlock); - llvm::PHINode *RVOrZero = Builder.CreatePHI(Ty); - RVOrZero->reserveOperandSpace(2); - RVOrZero->addIncoming(NZ, NonZeroBlock); - RVOrZero->addIncoming(Z, ZeroBlock); - RV = RValue::get(RVOrZero); - } else - RV = RValue::get(DynamicTypeAdjust(RV.getScalarVal(), - Adjustment.ReturnAdjustment)); - } - - if (!ResultType->isVoidType()) - EmitReturnOfRValue(RV, ResultType); - - FinishFunction(); - return Fn; -} - -llvm::Constant * -CodeGenModule::GetAddrOfThunk(GlobalDecl GD, - const ThunkAdjustment &ThisAdjustment) { - const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); - - // Compute mangled name - llvm::SmallString<256> OutName; - if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD)) - getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), ThisAdjustment, - OutName); - else - getMangleContext().mangleThunk(MD, ThisAdjustment, OutName); - - // Get function for mangled name - const llvm::Type *Ty = getTypes().GetFunctionTypeForVtable(MD); - return GetOrCreateLLVMFunction(OutName, Ty, GlobalDecl()); -} - -llvm::Constant * -CodeGenModule::GetAddrOfCovariantThunk(GlobalDecl GD, - const CovariantThunkAdjustment &Adjustment) { - const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); - - // Compute mangled name - llvm::SmallString<256> Name; - getMangleContext().mangleCovariantThunk(MD, Adjustment, Name); - - // Get function for mangled name - const llvm::Type *Ty = getTypes().GetFunctionTypeForVtable(MD); - return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl()); -} - -void CodeGenModule::BuildThunksForVirtual(GlobalDecl GD) { - CGVtableInfo::AdjustmentVectorTy *AdjPtr = getVtableInfo().getAdjustments(GD); - if (!AdjPtr) - return; - CGVtableInfo::AdjustmentVectorTy &Adj = *AdjPtr; - const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); - for (unsigned i = 0; i < Adj.size(); i++) { - GlobalDecl OGD = Adj[i].first; - const CXXMethodDecl *OMD = cast<CXXMethodDecl>(OGD.getDecl()); - QualType nc_oret = OMD->getType()->getAs<FunctionType>()->getResultType(); - CanQualType oret = getContext().getCanonicalType(nc_oret); - QualType nc_ret = MD->getType()->getAs<FunctionType>()->getResultType(); - CanQualType ret = getContext().getCanonicalType(nc_ret); - ThunkAdjustment ReturnAdjustment; - if (oret != ret) { - QualType qD = nc_ret->getPointeeType(); - QualType qB = nc_oret->getPointeeType(); - CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl()); - CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl()); - ReturnAdjustment = ComputeThunkAdjustment(D, B); - } - ThunkAdjustment ThisAdjustment = Adj[i].second; - bool Extern = !cast<CXXRecordDecl>(OMD->getDeclContext())->isInAnonymousNamespace(); - if (!ReturnAdjustment.isEmpty() || !ThisAdjustment.isEmpty()) { - CovariantThunkAdjustment CoAdj(ThisAdjustment, ReturnAdjustment); - llvm::Constant *FnConst; - if (!ReturnAdjustment.isEmpty()) - FnConst = GetAddrOfCovariantThunk(GD, CoAdj); - else - FnConst = GetAddrOfThunk(GD, ThisAdjustment); - if (!isa<llvm::Function>(FnConst)) { - llvm::Constant *SubExpr = - cast<llvm::ConstantExpr>(FnConst)->getOperand(0); - llvm::Function *OldFn = cast<llvm::Function>(SubExpr); - llvm::Constant *NewFnConst; - if (!ReturnAdjustment.isEmpty()) - NewFnConst = GetAddrOfCovariantThunk(GD, CoAdj); - else - NewFnConst = GetAddrOfThunk(GD, ThisAdjustment); - llvm::Function *NewFn = cast<llvm::Function>(NewFnConst); - NewFn->takeName(OldFn); - llvm::Constant *NewPtrForOldDecl = - llvm::ConstantExpr::getBitCast(NewFn, OldFn->getType()); - OldFn->replaceAllUsesWith(NewPtrForOldDecl); - OldFn->eraseFromParent(); - FnConst = NewFn; - } - llvm::Function *Fn = cast<llvm::Function>(FnConst); - if (Fn->isDeclaration()) { - llvm::GlobalVariable::LinkageTypes linktype; - linktype = llvm::GlobalValue::WeakAnyLinkage; - if (!Extern) - linktype = llvm::GlobalValue::InternalLinkage; - Fn->setLinkage(linktype); - if (!Features.Exceptions && !Features.ObjCNonFragileABI) - Fn->addFnAttr(llvm::Attribute::NoUnwind); - Fn->setAlignment(2); - CodeGenFunction(*this).GenerateCovariantThunk(Fn, GD, Extern, CoAdj); - } - } - } -} - -llvm::Constant * -CodeGenModule::BuildThunk(GlobalDecl GD, bool Extern, - const ThunkAdjustment &ThisAdjustment) { - const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); - llvm::SmallString<256> OutName; - if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(MD)) { - getMangleContext().mangleCXXDtorThunk(D, GD.getDtorType(), ThisAdjustment, - OutName); - } else - getMangleContext().mangleThunk(MD, ThisAdjustment, OutName); - - llvm::GlobalVariable::LinkageTypes linktype; - linktype = llvm::GlobalValue::WeakAnyLinkage; - if (!Extern) - linktype = llvm::GlobalValue::InternalLinkage; - llvm::Type *Ptr8Ty=llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),0); - const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); - const llvm::FunctionType *FTy = - getTypes().GetFunctionType(getTypes().getFunctionInfo(MD), - FPT->isVariadic()); - - llvm::Function *Fn = llvm::Function::Create(FTy, linktype, OutName.str(), - &getModule()); - CodeGenFunction(*this).GenerateThunk(Fn, GD, Extern, ThisAdjustment); - llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty); - return m; -} - -llvm::Constant * -CodeGenModule::BuildCovariantThunk(const GlobalDecl &GD, bool Extern, - const CovariantThunkAdjustment &Adjustment) { - const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); - llvm::SmallString<256> OutName; - getMangleContext().mangleCovariantThunk(MD, Adjustment, OutName); - llvm::GlobalVariable::LinkageTypes linktype; - linktype = llvm::GlobalValue::WeakAnyLinkage; - if (!Extern) - linktype = llvm::GlobalValue::InternalLinkage; - llvm::Type *Ptr8Ty=llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),0); - const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); - const llvm::FunctionType *FTy = - getTypes().GetFunctionType(getTypes().getFunctionInfo(MD), - FPT->isVariadic()); - - llvm::Function *Fn = llvm::Function::Create(FTy, linktype, OutName.str(), - &getModule()); - CodeGenFunction(*this).GenerateCovariantThunk(Fn, MD, Extern, Adjustment); - llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty); - return m; -} - static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VtableIndex, llvm::Value *This, const llvm::Type *Ty) { Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo(); @@ -618,17 +313,17 @@ llvm::Value * CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This, const llvm::Type *Ty) { MD = MD->getCanonicalDecl(); - uint64_t VtableIndex = CGM.getVtableInfo().getMethodVtableIndex(MD); + uint64_t VTableIndex = CGM.getVTables().getMethodVtableIndex(MD); - return ::BuildVirtualCall(*this, VtableIndex, This, Ty); + return ::BuildVirtualCall(*this, VTableIndex, This, Ty); } llvm::Value * CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type, llvm::Value *&This, const llvm::Type *Ty) { DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl()); - uint64_t VtableIndex = - CGM.getVtableInfo().getMethodVtableIndex(GlobalDecl(DD, Type)); + uint64_t VTableIndex = + CGM.getVTables().getMethodVtableIndex(GlobalDecl(DD, Type)); - return ::BuildVirtualCall(*this, VtableIndex, This, Ty); + return ::BuildVirtualCall(*this, VTableIndex, This, Ty); } diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp index 072b1f6..cb1ecc1 100644 --- a/lib/CodeGen/CGCall.cpp +++ b/lib/CodeGen/CGCall.cpp @@ -67,8 +67,7 @@ const CGFunctionInfo & CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) { return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(), llvm::SmallVector<CanQualType, 16>(), - FTNP->getCallConv(), - FTNP->getNoReturnAttr()); + FTNP->getExtInfo()); } /// \param Args - contains any initial parameters besides those @@ -81,8 +80,7 @@ static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT, ArgTys.push_back(FTP->getArgType(i)); CanQualType ResTy = FTP->getResultType().getUnqualifiedType(); return CGT.getFunctionInfo(ResTy, ArgTys, - FTP->getCallConv(), - FTP->getNoReturnAttr()); + FTP->getExtInfo()); } const CGFunctionInfo & @@ -175,8 +173,10 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) { } return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, - getCallingConventionForDecl(MD), - /*NoReturn*/ false); + FunctionType::ExtInfo( + /*NoReturn*/ false, + /*RegParm*/ 0, + getCallingConventionForDecl(MD))); } const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) { @@ -194,43 +194,40 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) { const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, const CallArgList &Args, - CallingConv CC, - bool NoReturn) { + const FunctionType::ExtInfo &Info) { // FIXME: Kill copy. llvm::SmallVector<CanQualType, 16> ArgTys; for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i) ArgTys.push_back(Context.getCanonicalParamType(i->second)); - return getFunctionInfo(GetReturnType(ResTy), ArgTys, CC, NoReturn); + return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info); } const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, const FunctionArgList &Args, - CallingConv CC, - bool NoReturn) { + const FunctionType::ExtInfo &Info) { // FIXME: Kill copy. llvm::SmallVector<CanQualType, 16> ArgTys; for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i) ArgTys.push_back(Context.getCanonicalParamType(i->second)); - return getFunctionInfo(GetReturnType(ResTy), ArgTys, CC, NoReturn); + return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info); } const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, const llvm::SmallVectorImpl<CanQualType> &ArgTys, - CallingConv CallConv, - bool NoReturn) { + const FunctionType::ExtInfo &Info) { #ifndef NDEBUG for (llvm::SmallVectorImpl<CanQualType>::const_iterator I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) assert(I->isCanonicalAsParam()); #endif - unsigned CC = ClangCallConvToLLVMCallConv(CallConv); + unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC()); // Lookup or create unique function info. llvm::FoldingSetNodeID ID; - CGFunctionInfo::Profile(ID, CC, NoReturn, ResTy, + CGFunctionInfo::Profile(ID, Info, ResTy, ArgTys.begin(), ArgTys.end()); void *InsertPos = 0; @@ -239,7 +236,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, return *FI; // Construct the function info. - FI = new CGFunctionInfo(CC, NoReturn, ResTy, ArgTys); + FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, ArgTys); FunctionInfos.InsertNode(FI, InsertPos); // Compute ABI information. @@ -250,11 +247,12 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention, bool _NoReturn, + unsigned _RegParm, CanQualType ResTy, const llvm::SmallVectorImpl<CanQualType> &ArgTys) : CallingConvention(_CallingConvention), EffectiveCallingConvention(_CallingConvention), - NoReturn(_NoReturn) + NoReturn(_NoReturn), RegParm(_RegParm) { NumArgs = ArgTys.size(); Args = new ArgInfo[1 + NumArgs]; @@ -610,11 +608,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, // FIXME: we need to honour command line settings also... // FIXME: RegParm should be reduced in case of nested functions and/or global // register variable. - signed RegParm = 0; - if (TargetDecl) - if (const RegparmAttr *RegParmAttr - = TargetDecl->getAttr<RegparmAttr>()) - RegParm = RegParmAttr->getNumParams(); + signed RegParm = FI.getRegParm(); unsigned PointerWidth = getContext().Target.getPointerWidth(0); for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), @@ -623,8 +617,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, const ABIArgInfo &AI = it->info; unsigned Attributes = 0; - if (ParamType.isRestrictQualified()) - Attributes |= llvm::Attribute::NoAlias; + // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we + // have the corresponding parameter variable. It doesn't make + // sense to do it here because parameters are so fucked up. switch (AI.getKind()) { case ABIArgInfo::Coerce: @@ -749,6 +744,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, V = CreateMemTemp(Ty); Builder.CreateStore(AI, V); } else { + if (Arg->getType().isRestrictQualified()) + AI->addAttr(llvm::Attribute::NoAlias); + if (!getContext().typesAreCompatible(Ty, Arg->getType())) { // This must be a promotion, for something like // "void a(x) short x; {..." diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h index 3d81165..31c8aac 100644 --- a/lib/CodeGen/CGCall.h +++ b/lib/CodeGen/CGCall.h @@ -76,12 +76,16 @@ namespace CodeGen { unsigned NumArgs; ArgInfo *Args; + /// How many arguments to pass inreg. + unsigned RegParm; + public: typedef const ArgInfo *const_arg_iterator; typedef ArgInfo *arg_iterator; CGFunctionInfo(unsigned CallingConvention, bool NoReturn, + unsigned RegParm, CanQualType ResTy, const llvm::SmallVectorImpl<CanQualType> &ArgTys); ~CGFunctionInfo() { delete[] Args; } @@ -108,6 +112,8 @@ namespace CodeGen { EffectiveCallingConvention = Value; } + unsigned getRegParm() const { return RegParm; } + CanQualType getReturnType() const { return Args[0].type; } ABIArgInfo &getReturnInfo() { return Args[0].info; } @@ -116,19 +122,20 @@ namespace CodeGen { void Profile(llvm::FoldingSetNodeID &ID) { ID.AddInteger(getCallingConvention()); ID.AddBoolean(NoReturn); + ID.AddInteger(RegParm); getReturnType().Profile(ID); for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it) it->type.Profile(ID); } template<class Iterator> static void Profile(llvm::FoldingSetNodeID &ID, - unsigned CallingConvention, - bool NoReturn, + const FunctionType::ExtInfo &Info, CanQualType ResTy, Iterator begin, Iterator end) { - ID.AddInteger(CallingConvention); - ID.AddBoolean(NoReturn); + ID.AddInteger(Info.getCC()); + ID.AddBoolean(Info.getNoReturn()); + ID.AddInteger(Info.getRegParm()); ResTy.Profile(ID); for (; begin != end; ++begin) { CanQualType T = *begin; // force iterator to be over canonical types diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp index 525e858..177e862 100644 --- a/lib/CodeGen/CGClass.cpp +++ b/lib/CodeGen/CGClass.cpp @@ -69,42 +69,6 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *Class, return llvm::ConstantInt::get(PtrDiffTy, Offset); } -// FIXME: This probably belongs in CGVtable, but it relies on -// the static function ComputeNonVirtualBaseClassOffset, so we should make that -// a CodeGenModule member function as well. -ThunkAdjustment -CodeGenModule::ComputeThunkAdjustment(const CXXRecordDecl *ClassDecl, - const CXXRecordDecl *BaseClassDecl) { - CXXBasePaths Paths(/*FindAmbiguities=*/false, - /*RecordPaths=*/true, /*DetectVirtual=*/false); - if (!const_cast<CXXRecordDecl *>(ClassDecl)-> - isDerivedFrom(const_cast<CXXRecordDecl *>(BaseClassDecl), Paths)) { - assert(false && "Class must be derived from the passed in base class!"); - return ThunkAdjustment(); - } - - unsigned Start = 0; - uint64_t VirtualOffset = 0; - - const CXXBasePath &Path = Paths.front(); - const CXXRecordDecl *VBase = 0; - for (unsigned i = 0, e = Path.size(); i != e; ++i) { - const CXXBasePathElement& Element = Path[i]; - if (Element.Base->isVirtual()) { - Start = i+1; - QualType VBaseType = Element.Base->getType(); - VBase = cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl()); - } - } - if (VBase) - VirtualOffset = - getVtableInfo().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl); - - uint64_t Offset = - ComputeNonVirtualBaseClassOffset(getContext(), Paths.front(), Start); - return ThunkAdjustment(Offset, VirtualOffset); -} - /// Gets the address of a virtual base class within a complete object. /// This should only be used for (1) non-virtual bases or (2) virtual bases /// when the type is known to be complete (e.g. in complete destructors). @@ -139,7 +103,7 @@ CodeGenFunction::GetAddressOfBaseOfCompleteClass(llvm::Value *This, V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); return V; -} +} llvm::Value * CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value, @@ -308,6 +272,53 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, return Value; } +/// EmitCopyCtorCall - Emit a call to a copy constructor. +static void +EmitCopyCtorCall(CodeGenFunction &CGF, + const CXXConstructorDecl *CopyCtor, CXXCtorType CopyCtorType, + llvm::Value *ThisPtr, llvm::Value *VTT, llvm::Value *Src) { + llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor, CopyCtorType); + + CallArgList CallArgs; + + // Push the this ptr. + CallArgs.push_back(std::make_pair(RValue::get(ThisPtr), + CopyCtor->getThisType(CGF.getContext()))); + + // Push the VTT parameter if necessary. + if (VTT) { + QualType T = CGF.getContext().getPointerType(CGF.getContext().VoidPtrTy); + CallArgs.push_back(std::make_pair(RValue::get(VTT), T)); + } + + // Push the Src ptr. + CallArgs.push_back(std::make_pair(RValue::get(Src), + CopyCtor->getParamDecl(0)->getType())); + + + { + CodeGenFunction::CXXTemporariesCleanupScope Scope(CGF); + + // If the copy constructor has default arguments, emit them. + for (unsigned I = 1, E = CopyCtor->getNumParams(); I < E; ++I) { + const ParmVarDecl *Param = CopyCtor->getParamDecl(I); + const Expr *DefaultArgExpr = Param->getDefaultArg(); + + assert(DefaultArgExpr && "Ctor parameter must have default arg!"); + + QualType ArgType = Param->getType(); + CallArgs.push_back(std::make_pair(CGF.EmitCallArg(DefaultArgExpr, + ArgType), + ArgType)); + } + + const FunctionProtoType *FPT = + CopyCtor->getType()->getAs<FunctionProtoType>(); + CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT), + Callee, ReturnValueSlot(), CallArgs, CopyCtor); + } +} + /// EmitClassAggrMemberwiseCopy - This routine generates code to copy a class /// array of objects from SrcValue to DestValue. Copying can be either a bitwise /// copy or via a copy constructor call. @@ -354,22 +365,9 @@ void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest, if (BitwiseCopy) EmitAggregateCopy(Dest, Src, Ty); else if (CXXConstructorDecl *BaseCopyCtor = - BaseClassDecl->getCopyConstructor(getContext(), 0)) { - llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor, - Ctor_Complete); - CallArgList CallArgs; - // Push the this (Dest) ptr. - CallArgs.push_back(std::make_pair(RValue::get(Dest), - BaseCopyCtor->getThisType(getContext()))); + BaseClassDecl->getCopyConstructor(getContext(), 0)) + EmitCopyCtorCall(*this, BaseCopyCtor, Ctor_Complete, Dest, 0, Src); - // Push the Src ptr. - CallArgs.push_back(std::make_pair(RValue::get(Src), - BaseCopyCtor->getParamDecl(0)->getType())); - const FunctionProtoType *FPT - = BaseCopyCtor->getType()->getAs<FunctionProtoType>(); - EmitCall(CGM.getTypes().getFunctionInfo(CallArgs, FPT), - Callee, ReturnValueSlot(), CallArgs, BaseCopyCtor); - } EmitBlock(ContinueBlock); // Emit the increment of the loop counter. @@ -471,7 +469,7 @@ void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest, /// GetVTTParameter - Return the VTT parameter that should be passed to a /// base constructor/destructor with virtual bases. static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD) { - if (!CGVtableInfo::needsVTTParameter(GD)) { + if (!CodeGenVTables::needsVTTParameter(GD)) { // This constructor/destructor does not need a VTT parameter. return 0; } @@ -486,21 +484,21 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD) { // If the record matches the base, this is the complete ctor/dtor // variant calling the base variant in a class with virtual bases. if (RD == Base) { - assert(!CGVtableInfo::needsVTTParameter(CGF.CurGD) && + assert(!CodeGenVTables::needsVTTParameter(CGF.CurGD) && "doing no-op VTT offset in base dtor/ctor?"); SubVTTIndex = 0; } else { - SubVTTIndex = CGF.CGM.getVtableInfo().getSubVTTIndex(RD, Base); + SubVTTIndex = CGF.CGM.getVTables().getSubVTTIndex(RD, Base); assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); } - if (CGVtableInfo::needsVTTParameter(CGF.CurGD)) { + if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) { // A VTT parameter was passed to the constructor, use it. VTT = CGF.LoadCXXVTT(); VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); } else { // We're the complete constructor, so get the VTT by name. - VTT = CGF.CGM.getVtableInfo().getVTT(RD); + VTT = CGF.CGM.getVTables().getVTT(RD); VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); } @@ -531,29 +529,13 @@ void CodeGenFunction::EmitClassMemberwiseCopy( return; } - if (CXXConstructorDecl *BaseCopyCtor = - BaseClassDecl->getCopyConstructor(getContext(), 0)) { - llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor, CtorType); - CallArgList CallArgs; - // Push the this (Dest) ptr. - CallArgs.push_back(std::make_pair(RValue::get(Dest), - BaseCopyCtor->getThisType(getContext()))); - - // Push the VTT parameter, if necessary. - if (llvm::Value *VTT = - GetVTTParameter(*this, GlobalDecl(BaseCopyCtor, CtorType))) { - QualType T = getContext().getPointerType(getContext().VoidPtrTy); - CallArgs.push_back(std::make_pair(RValue::get(VTT), T)); - } + CXXConstructorDecl *BaseCopyCtor = + BaseClassDecl->getCopyConstructor(getContext(), 0); + if (!BaseCopyCtor) + return; - // Push the Src ptr. - CallArgs.push_back(std::make_pair(RValue::get(Src), - BaseCopyCtor->getParamDecl(0)->getType())); - const FunctionProtoType *FPT = - BaseCopyCtor->getType()->getAs<FunctionProtoType>(); - EmitCall(CGM.getTypes().getFunctionInfo(CallArgs, FPT), - Callee, ReturnValueSlot(), CallArgs, BaseCopyCtor); - } + llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(BaseCopyCtor, CtorType)); + EmitCopyCtorCall(*this, BaseCopyCtor, CtorType, Dest, VTT, Src); } /// EmitClassCopyAssignment - This routine generates code to copy assign a class @@ -690,7 +672,7 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const FunctionArgList &Args) { } } - InitializeVtablePtrs(ClassDecl); + InitializeVTablePointers(ClassDecl); } /// SynthesizeCXXCopyAssignment - Implicitly define copy assignment operator. @@ -1010,7 +992,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, MemberInitializers.push_back(Member); } - InitializeVtablePtrs(ClassDecl); + InitializeVTablePointers(ClassDecl); for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I) { assert(LiveTemporaries.empty() && @@ -1060,7 +1042,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { // Otherwise, we're in the base variant, so we need to ensure the // vtable ptrs are right before emitting the body. } else { - InitializeVtablePtrs(Dtor->getParent()); + InitializeVTablePointers(Dtor->getParent()); } // Emit the body of the statement. @@ -1286,14 +1268,12 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, // before the construction of the next array element, if any. // Keep track of the current number of live temporaries. - unsigned OldNumLiveTemporaries = LiveTemporaries.size(); + { + CXXTemporariesCleanupScope Scope(*this); - EmitCXXConstructorCall(D, Ctor_Complete, Address, ArgBeg, ArgEnd); + EmitCXXConstructorCall(D, Ctor_Complete, Address, ArgBeg, ArgEnd); + } - // Pop temporaries. - while (LiveTemporaries.size() > OldNumLiveTemporaries) - PopCXXTemporary(); - EmitBlock(ContinueBlock); // Emit the increment of the loop counter. @@ -1399,7 +1379,7 @@ CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D, llvm::raw_svector_ostream(Name) << "__tcf_" << (++UniqueAggrDestructorCount); QualType R = getContext().VoidTy; const CGFunctionInfo &FI - = CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false); + = CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false); llvm::Function *Fn = llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage, @@ -1474,7 +1454,7 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); DelegateArgs.push_back(std::make_pair(RValue::get(VTT), VoidPP)); - if (CGVtableInfo::needsVTTParameter(CurGD)) { + if (CodeGenVTables::needsVTTParameter(CurGD)) { assert(I != E && "cannot skip vtt parameter, already done with args"); assert(I->second == VoidPP && "skipping parameter not of vtt type"); ++I; @@ -1541,7 +1521,7 @@ CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This, VTablePtr = Builder.CreateLoad(VTablePtr, "vtable"); int64_t VBaseOffsetOffset = - CGM.getVtableInfo().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl); + CGM.getVTables().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl); llvm::Value *VBaseOffsetPtr = Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset, "vbase.offset.ptr"); @@ -1556,69 +1536,126 @@ CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This, return VBaseOffset; } -void CodeGenFunction::InitializeVtablePtrs(const CXXRecordDecl *ClassDecl) { - if (!ClassDecl->isDynamicClass()) - return; +void +CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, + bool BaseIsMorallyVirtual, + llvm::Constant *VTable, + const CXXRecordDecl *VTableClass) { + const CXXRecordDecl *RD = Base.getBase(); + + // Compute the address point. + llvm::Value *VTableAddressPoint; + + // Check if we need to use a vtable from the VTT. + if (CodeGenVTables::needsVTTParameter(CurGD) && + (RD->getNumVBases() || BaseIsMorallyVirtual)) { + // Get the secondary vpointer index. + uint64_t VirtualPointerIndex = + CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); + + /// Load the VTT. + llvm::Value *VTT = LoadCXXVTT(); + if (VirtualPointerIndex) + VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex); - llvm::Constant *Vtable = CGM.getVtableInfo().getVtable(ClassDecl); - CGVtableInfo::AddrSubMap_t& AddressPoints = - *(*CGM.getVtableInfo().AddressPoints[ClassDecl])[ClassDecl]; - llvm::Value *ThisPtr = LoadCXXThis(); - const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl); + // And load the address point from the VTT. + VTableAddressPoint = Builder.CreateLoad(VTT); + } else { + uint64_t AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass); + VTableAddressPoint = + Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint); + } - // Store address points for virtual bases - for (CXXRecordDecl::base_class_const_iterator I = - ClassDecl->vbases_begin(), E = ClassDecl->vbases_end(); I != E; ++I) { - const CXXBaseSpecifier &Base = *I; - CXXRecordDecl *BaseClassDecl - = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); - uint64_t Offset = Layout.getVBaseClassOffset(BaseClassDecl); - InitializeVtablePtrsRecursive(BaseClassDecl, Vtable, AddressPoints, - ThisPtr, Offset); + // Compute where to store the address point. + llvm::Value *VTableField; + + if (CodeGenVTables::needsVTTParameter(CurGD) && BaseIsMorallyVirtual) { + // We need to use the virtual base offset offset because the virtual base + // might have a different offset in the most derived class. + VTableField = GetAddressOfBaseClass(LoadCXXThis(), VTableClass, RD, + /*NullCheckValue=*/false); + } else { + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); + + VTableField = Builder.CreateBitCast(LoadCXXThis(), Int8PtrTy); + VTableField = + Builder.CreateConstInBoundsGEP1_64(VTableField, Base.getBaseOffset() / 8); } - // Store address points for non-virtual bases and current class - InitializeVtablePtrsRecursive(ClassDecl, Vtable, AddressPoints, ThisPtr, 0); + // Finally, store the address point. + const llvm::Type *AddressPointPtrTy = + VTableAddressPoint->getType()->getPointerTo(); + VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy); + Builder.CreateStore(VTableAddressPoint, VTableField); } -void CodeGenFunction::InitializeVtablePtrsRecursive( - const CXXRecordDecl *ClassDecl, - llvm::Constant *Vtable, - CGVtableInfo::AddrSubMap_t& AddressPoints, - llvm::Value *ThisPtr, - uint64_t Offset) { - if (!ClassDecl->isDynamicClass()) - return; +void +CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, + bool BaseIsMorallyVirtual, + bool BaseIsNonVirtualPrimaryBase, + llvm::Constant *VTable, + const CXXRecordDecl *VTableClass, + VisitedVirtualBasesSetTy& VBases) { + // If this base is a non-virtual primary base the address point has already + // been set. + if (!BaseIsNonVirtualPrimaryBase) { + // Initialize the vtable pointer for this base. + InitializeVTablePointer(Base, BaseIsMorallyVirtual, VTable, VTableClass); + } + + const CXXRecordDecl *RD = Base.getBase(); - // Store address points for non-virtual bases - const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl); - for (CXXRecordDecl::base_class_const_iterator I = - ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) { - const CXXBaseSpecifier &Base = *I; - if (Base.isVirtual()) + // Traverse bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + CXXRecordDecl *BaseDecl + = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + // Ignore classes without a vtable. + if (!BaseDecl->isDynamicClass()) continue; - CXXRecordDecl *BaseClassDecl - = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); - uint64_t NewOffset = Offset + Layout.getBaseClassOffset(BaseClassDecl); - InitializeVtablePtrsRecursive(BaseClassDecl, Vtable, AddressPoints, - ThisPtr, NewOffset); + + uint64_t BaseOffset; + bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual; + bool BaseDeclIsNonVirtualPrimaryBase; + + if (I->isVirtual()) { + // Check if we've visited this virtual base before. + if (!VBases.insert(BaseDecl)) + continue; + + const ASTRecordLayout &Layout = + getContext().getASTRecordLayout(VTableClass); + + BaseOffset = Layout.getVBaseClassOffset(BaseDecl); + BaseDeclIsMorallyVirtual = true; + BaseDeclIsNonVirtualPrimaryBase = false; + } else { + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); + + BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); + BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; + } + + InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), + BaseDeclIsMorallyVirtual, + BaseDeclIsNonVirtualPrimaryBase, + VTable, VTableClass, VBases); } +} - // Compute the address point - assert(AddressPoints.count(std::make_pair(ClassDecl, Offset)) && - "Missing address point for class"); - uint64_t AddressPoint = AddressPoints[std::make_pair(ClassDecl, Offset)]; - llvm::Value *VtableAddressPoint = - Builder.CreateConstInBoundsGEP2_64(Vtable, 0, AddressPoint); - - // Compute the address to store the address point - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); - llvm::Value *VtableField = Builder.CreateBitCast(ThisPtr, Int8PtrTy); - VtableField = Builder.CreateConstInBoundsGEP1_64(VtableField, Offset/8); - const llvm::Type *AddressPointPtrTy = - VtableAddressPoint->getType()->getPointerTo(); - VtableField = Builder.CreateBitCast(VtableField, AddressPointPtrTy); +void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { + // Ignore classes without a vtable. + if (!RD->isDynamicClass()) + return; + + // Get the VTable. + llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD); - // Store address point - Builder.CreateStore(VtableAddressPoint, VtableField); + // Initialize the vtable pointers for this class and all of its bases. + VisitedVirtualBasesSetTy VBases; + InitializeVTablePointers(BaseSubobject(RD, 0), + /*BaseIsMorallyVirtual=*/false, + /*BaseIsNonVirtualPrimaryBase=*/false, + VTable, RD, VBases); } diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp index ad97d08..58acd3c 100644 --- a/lib/CodeGen/CGDebugInfo.cpp +++ b/lib/CodeGen/CGDebugInfo.cpp @@ -88,17 +88,35 @@ llvm::StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) { /// getOrCreateFile - Get the file debug info descriptor for the input location. llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) { - if (!Loc.isValid()) + if (!Loc.isValid()) // If Location is not valid then use main input file. return DebugFactory.CreateFile(TheCU.getFilename(), TheCU.getDirectory(), TheCU); SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(Loc); + + // Cache the results. + const char *fname = PLoc.getFilename(); + llvm::DenseMap<const char *, llvm::WeakVH>::iterator it = + DIFileCache.find(fname); + + if (it != DIFileCache.end()) { + // Verify that the information still exists. + if (&*it->second) + return llvm::DIFile(cast<llvm::MDNode>(it->second)); + } + + // FIXME: We shouldn't even need to call 'makeAbsolute()' in the cases + // where we can consult the FileEntry. llvm::sys::Path AbsFileName(PLoc.getFilename()); AbsFileName.makeAbsolute(); - return DebugFactory.CreateFile(AbsFileName.getLast(), - AbsFileName.getDirname(), TheCU); + llvm::DIFile F = DebugFactory.CreateFile(AbsFileName.getLast(), + AbsFileName.getDirname(), TheCU); + + DIFileCache[fname] = F.getNode(); + return F; + } /// CreateCompileUnit - Create new compile unit. void CGDebugInfo::CreateCompileUnit() { @@ -112,6 +130,10 @@ void CGDebugInfo::CreateCompileUnit() { llvm::sys::Path AbsFileName(MainFileName); AbsFileName.makeAbsolute(); + // The main file name provided via the "-main-file-name" option contains just + // the file name itself with no path information. This file name may have had + // a relative path, so we look into the actual file entry for the main + // file to determine the real absolute path for the file. std::string MainFileDir; if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) MainFileDir = MainFile->getDir()->getName(); @@ -604,7 +626,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method, // It doesn't make sense to give a virtual destructor a vtable index, // since a single destructor has two entries in the vtable. if (!isa<CXXDestructorDecl>(Method)) - VIndex = CGM.getVtableInfo().getMethodVtableIndex(Method); + VIndex = CGM.getVTables().getMethodVtableIndex(Method); ContainingType = RecordTy; } @@ -662,7 +684,7 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit, if (BI->isVirtual()) { // virtual base offset offset is -ve. The code generator emits dwarf // expression where it expects +ve number. - BaseOffset = 0 - CGM.getVtableInfo().getVirtualBaseOffsetOffset(RD, Base); + BaseOffset = 0 - CGM.getVTables().getVirtualBaseOffsetOffset(RD, Base); BFlags = llvm::DIType::FlagVirtual; } else BaseOffset = RL.getBaseClassOffset(Base); @@ -692,10 +714,8 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) { ASTContext &Context = CGM.getContext(); /* Function type */ - llvm::SmallVector<llvm::DIDescriptor, 16> STys; - STys.push_back(getOrCreateType(Context.IntTy, Unit)); - llvm::DIArray SElements = - DebugFactory.GetOrCreateArray(STys.data(), STys.size()); + llvm::DIDescriptor STy = getOrCreateType(Context.IntTy, Unit); + llvm::DIArray SElements = DebugFactory.GetOrCreateArray(&STy, 1); llvm::DIType SubTy = DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type, Unit, "", Unit, @@ -1048,11 +1068,9 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, uint64_t NumElems = Ty->getNumElements(); if (NumElems > 0) --NumElems; - llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts; - Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, NumElems)); - llvm::DIArray SubscriptArray = - DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size()); + llvm::DIDescriptor Subscript = DebugFactory.GetOrCreateSubrange(0, NumElems); + llvm::DIArray SubscriptArray = DebugFactory.GetOrCreateArray(&Subscript, 1); uint64_t Size = CGM.getContext().getTypeSize(Ty); uint64_t Align = CGM.getContext().getTypeAlign(Ty); @@ -1208,7 +1226,7 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, Ty = UnwrapTypeForDebugInfo(Ty); // Check for existing entry. - std::map<void *, llvm::WeakVH>::iterator it = + llvm::DenseMap<void *, llvm::WeakVH>::iterator it = TypeCache.find(Ty.getAsOpaquePtr()); if (it != TypeCache.end()) { // Verify that the debug info still exists. @@ -1371,13 +1389,10 @@ void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) { llvm::DIFile Unit = getOrCreateFile(CurLoc); PresumedLoc PLoc = SM.getPresumedLoc(CurLoc); - llvm::DIDescriptor DR(RegionStack.back()); - llvm::DIScope DS = llvm::DIScope(DR.getNode()); - llvm::DILocation DO(NULL); - llvm::DILocation DL = - DebugFactory.CreateLocation(PLoc.getLine(), PLoc.getColumn(), - DS, DO); - Builder.SetCurrentDebugLocation(DL.getNode()); + llvm::MDNode *Scope = RegionStack.back(); + Builder.SetCurrentDebugLocation(llvm::NewDebugLoc::get(PLoc.getLine(), + PLoc.getColumn(), + Scope)); } /// EmitRegionStart- Constructs the debug code for entering a declarative @@ -1580,11 +1595,8 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag, llvm::Instruction *Call = DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock()); - llvm::DIScope DS(RegionStack.back()); - llvm::DILocation DO(NULL); - llvm::DILocation DL = DebugFactory.CreateLocation(Line, Column, DS, DO); - - Call->setMetadata("dbg", DL.getNode()); + llvm::MDNode *Scope = RegionStack.back(); + Call->setDebugLoc(llvm::NewDebugLoc::get(Line, Column, Scope)); } /// EmitDeclare - Emit local variable declaration debug info. @@ -1646,13 +1658,9 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag, // Insert an llvm.dbg.declare into the current block. llvm::Instruction *Call = DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock()); - - llvm::DIScope DS(RegionStack.back()); - llvm::DILocation DO(NULL); - llvm::DILocation DL = - DebugFactory.CreateLocation(Line, PLoc.getColumn(), DS, DO); - Call->setMetadata("dbg", DL.getNode()); + llvm::MDNode *Scope = RegionStack.back(); + Call->setDebugLoc(llvm::NewDebugLoc::get(Line, PLoc.getColumn(), Scope)); } void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD, diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h index 47a4620..8397245 100644 --- a/lib/CodeGen/CGDebugInfo.h +++ b/lib/CodeGen/CGDebugInfo.h @@ -21,7 +21,6 @@ #include "llvm/Analysis/DebugInfo.h" #include "llvm/Support/ValueHandle.h" #include "llvm/Support/Allocator.h" -#include <map> #include "CGBuilder.h" @@ -52,8 +51,7 @@ class CGDebugInfo { unsigned FwdDeclCount; /// TypeCache - Cache of previously constructed Types. - // FIXME: Eliminate this map. Be careful of iterator invalidation. - std::map<void *, llvm::WeakVH> TypeCache; + llvm::DenseMap<void *, llvm::WeakVH> TypeCache; bool BlockLiteralGenericSet; llvm::DIType BlockLiteralGeneric; @@ -65,6 +63,7 @@ class CGDebugInfo { /// constructed on demand. For example, C++ destructors, C++ operators etc.. llvm::BumpPtrAllocator DebugInfoNames; + llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache; llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache; llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache; diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index dc9ecd6..87ec159 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -14,6 +14,7 @@ #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "CGCall.h" +#include "CGRecordLayout.h" #include "CGObjCRuntime.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" @@ -1468,7 +1469,9 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, const FieldDecl* Field, unsigned CVRQualifiers) { - CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field); + const CGRecordLayout &RL = + CGM.getTypes().getCGRecordLayout(Field->getParent()); + const CGRecordLayout::BitFieldInfo &Info = RL.getBitFieldInfo(Field); // FIXME: CodeGenTypes should expose a method to get the appropriate type for // FieldTy (the appropriate type is ABI-dependent). @@ -1496,7 +1499,9 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, if (Field->isBitField()) return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); - unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); + const CGRecordLayout &RL = + CGM.getTypes().getCGRecordLayout(Field->getParent()); + unsigned idx = RL.getLLVMFieldNo(Field); llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); // Match union field type. @@ -1531,7 +1536,9 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue, if (!FieldType->isReferenceType()) return EmitLValueForField(BaseValue, Field, CVRQualifiers); - unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); + const CGRecordLayout &RL = + CGM.getTypes().getCGRecordLayout(Field->getParent()); + unsigned idx = RL.getLLVMFieldNo(Field); llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); @@ -1637,6 +1644,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { case CastExpr::CK_AnyPointerToObjCPointerCast: return EmitLValue(E->getSubExpr()); + case CastExpr::CK_UncheckedDerivedToBase: case CastExpr::CK_DerivedToBase: { const RecordType *DerivedClassTy = E->getSubExpr()->getType()->getAs<RecordType>(); @@ -1872,7 +1880,6 @@ LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { // Can only get l-value for message expression returning aggregate type RValue RV = EmitAnyExprToTemp(E); - // FIXME: can this be volatile? return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); } diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp index 4847ca3..e2e2cd0 100644 --- a/lib/CodeGen/CGExprAgg.cpp +++ b/lib/CodeGen/CGExprAgg.cpp @@ -333,8 +333,7 @@ void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) { llvm::Value *FuncPtr; if (MD->isVirtual()) { - int64_t Index = - CGF.CGM.getVtableInfo().getMethodVtableIndex(MD); + int64_t Index = CGF.CGM.getVTables().getMethodVtableIndex(MD); // Itanium C++ ABI 2.3: // For a non-virtual function, this field is a simple function pointer. @@ -500,10 +499,6 @@ AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { llvm::Value *Val = DestPtr; - if (!Val) { - // Create a temporary variable. - Val = CGF.CreateMemTemp(E->getType(), "tmp"); - } CGF.EmitCXXExprWithTemporaries(E, Val, VolatileDest, IsInitializer); } diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp index 0328621..d9585c9 100644 --- a/lib/CodeGen/CGExprCXX.cpp +++ b/lib/CodeGen/CGExprCXX.cpp @@ -44,9 +44,8 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, QualType ResultType = FPT->getResultType(); return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args, - FPT->getCallConv(), - FPT->getNoReturnAttr()), Callee, - ReturnValue, Args, MD); + FPT->getExtInfo()), + Callee, ReturnValue, Args, MD); } /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given @@ -411,7 +410,8 @@ static CharUnits CalculateCookiePadding(ASTContext &Ctx, const CXXNewExpr *E) { return CalculateCookiePadding(Ctx, E->getAllocatedType()); } -static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, +static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context, + CodeGenFunction &CGF, const CXXNewExpr *E, llvm::Value *& NumElements) { QualType Type = E->getAllocatedType(); @@ -432,6 +432,15 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, NumElements = llvm::ConstantInt::get(SizeTy, Result.Val.getInt().getZExtValue()); + while (const ArrayType *AType = Context.getAsArrayType(Type)) { + const llvm::ArrayType *llvmAType = + cast<llvm::ArrayType>(CGF.ConvertType(Type)); + NumElements = + CGF.Builder.CreateMul(NumElements, + llvm::ConstantInt::get( + SizeTy, llvmAType->getNumElements())); + Type = AType->getElementType(); + } return llvm::ConstantInt::get(SizeTy, AllocSize.getQuantity()); } @@ -444,6 +453,16 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, CGF.Builder.CreateMul(NumElements, llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity())); + + while (const ArrayType *AType = Context.getAsArrayType(Type)) { + const llvm::ArrayType *llvmAType = + cast<llvm::ArrayType>(CGF.ConvertType(Type)); + NumElements = + CGF.Builder.CreateMul(NumElements, + llvm::ConstantInt::get( + SizeTy, llvmAType->getNumElements())); + Type = AType->getElementType(); + } // And add the cookie padding if necessary. if (!CookiePadding.isZero()) @@ -504,7 +523,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { QualType SizeTy = getContext().getSizeType(); llvm::Value *NumElements = 0; - llvm::Value *AllocSize = EmitCXXNewAllocSize(*this, E, NumElements); + llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(), + *this, E, NumElements); NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy)); @@ -590,10 +610,20 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { CookiePadding.getQuantity()); } - NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType())); - - EmitNewInitializer(*this, E, NewPtr, NumElements); - + if (AllocType->isArrayType()) { + while (const ArrayType *AType = getContext().getAsArrayType(AllocType)) + AllocType = AType->getElementType(); + NewPtr = + Builder.CreateBitCast(NewPtr, + ConvertType(getContext().getPointerType(AllocType))); + EmitNewInitializer(*this, E, NewPtr, NumElements); + NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType())); + } + else { + NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType())); + EmitNewInitializer(*this, E, NewPtr, NumElements); + } + if (NullCheckResult) { Builder.CreateBr(NewEnd); NewNotNull = Builder.GetInsertBlock(); diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp index 5915340..0a0c914 100644 --- a/lib/CodeGen/CGExprComplex.cpp +++ b/lib/CodeGen/CGExprComplex.cpp @@ -522,13 +522,21 @@ EmitCompoundAssign(const CompoundAssignOperator *E, // scalar. OpInfo.Ty = E->getComputationResultType(); OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty); - + LValue LHSLV = CGF.EmitLValue(E->getLHS()); - - // We know the LHS is a complex lvalue. - OpInfo.LHS=EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified()); - OpInfo.LHS=EmitComplexToComplexCast(OpInfo.LHS, LHSTy, OpInfo.Ty); + ComplexPairTy LHSComplexPair; + if (LHSLV.isPropertyRef()) + LHSComplexPair = + CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal(); + else if (LHSLV.isKVCRef()) + LHSComplexPair = + CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal(); + else + LHSComplexPair = EmitLoadOfComplex(LHSLV.getAddress(), + LHSLV.isVolatileQualified()); + + OpInfo.LHS=EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty); // Expand the binary operator. ComplexPairTy Result = (this->*Func)(OpInfo); @@ -537,12 +545,22 @@ EmitCompoundAssign(const CompoundAssignOperator *E, Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy); // Store the result value into the LHS lvalue. - EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified()); + if (LHSLV.isPropertyRef()) + CGF.EmitObjCPropertySet(LHSLV.getPropertyRefExpr(), + RValue::getComplex(Result)); + else if (LHSLV.isKVCRef()) + CGF.EmitObjCPropertySet(LHSLV.getKVCRefExpr(), RValue::getComplex(Result)); + else + EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified()); // And now return the LHS IgnoreReal = ignreal; IgnoreImag = ignimag; IgnoreRealAssign = ignreal; IgnoreImagAssign = ignimag; + if (LHSLV.isPropertyRef()) + return CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal(); + else if (LHSLV.isKVCRef()) + return CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal(); return EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified()); } diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp index f0d82a8..172a77d 100644 --- a/lib/CodeGen/CGExprConstant.cpp +++ b/lib/CodeGen/CGExprConstant.cpp @@ -14,6 +14,7 @@ #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "CGObjCRuntime.h" +#include "CGRecordLayout.h" #include "clang/AST/APValue.h" #include "clang/AST/ASTContext.h" #include "clang/AST/RecordLayout.h" @@ -417,7 +418,7 @@ public: // Get the function pointer (or index if this is a virtual function). if (MD->isVirtual()) { - uint64_t Index = CGM.getVtableInfo().getMethodVtableIndex(MD); + uint64_t Index = CGM.getVTables().getMethodVtableIndex(MD); // Itanium C++ ABI 2.3: // For a non-virtual function, this field is a simple function pointer. @@ -1011,7 +1012,9 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { E = RD->field_end(); I != E; ++I) { const FieldDecl *FD = *I; - unsigned FieldNo = getTypes().getLLVMFieldNo(FD); + const CGRecordLayout &RL = + getTypes().getCGRecordLayout(FD->getParent()); + unsigned FieldNo = RL.getLLVMFieldNo(FD); Elements[FieldNo] = EmitNullConstant(FD->getType()); } @@ -1047,7 +1050,9 @@ CodeGenModule::EmitPointerToDataMember(const FieldDecl *FD) { const llvm::StructType *ClassLTy = cast<llvm::StructType>(getTypes().ConvertType(ClassType)); - unsigned FieldNo = getTypes().getLLVMFieldNo(FD); + const CGRecordLayout &RL = + getTypes().getCGRecordLayout(FD->getParent()); + unsigned FieldNo = RL.getLLVMFieldNo(FD); uint64_t Offset = getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo); diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp index 7e26971..42bf68e 100644 --- a/lib/CodeGen/CGExprScalar.cpp +++ b/lib/CodeGen/CGExprScalar.cpp @@ -769,6 +769,9 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { static bool ShouldNullCheckClassCastValue(const CastExpr *CE) { const Expr *E = CE->getSubExpr(); + + if (CE->getCastKind() == CastExpr::CK_UncheckedDerivedToBase) + return false; if (isa<CXXThisExpr>(E)) { // We always assume that 'this' is never null. @@ -826,6 +829,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) { return CGF.GetAddressOfDerivedClass(Src, BaseClassDecl, DerivedClassDecl, NullCheckValue); } + case CastExpr::CK_UncheckedDerivedToBase: case CastExpr::CK_DerivedToBase: { const RecordType *DerivedClassTy = E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>(); @@ -1337,6 +1341,11 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { if (Ops.LHS->getType()->isFPOrFPVectorTy()) return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub"); + + // Signed integer overflow is undefined behavior. + if (Ops.Ty->isSignedIntegerType()) + return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub"); + return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub"); } diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp index 3ff77f0..9eaf57c 100644 --- a/lib/CodeGen/CGObjC.cpp +++ b/lib/CodeGen/CGObjC.cpp @@ -191,7 +191,7 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, // FIXME: We shouldn't need to get the function info here, the // runtime already should have computed it to build the function. RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args, - CC_Default, false), + FunctionType::ExtInfo()), GetPropertyFn, ReturnValueSlot(), Args); // We need to fix the type here. Ivars with copy & retain are // always objects so we don't need to worry about complex or @@ -201,7 +201,12 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, EmitReturnOfRValue(RV, PD->getType()); } else { LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0); - if (hasAggregateLLVMType(Ivar->getType())) { + if (Ivar->getType()->isAnyComplexType()) { + ComplexPairTy Pair = LoadComplexFromAddr(LV.getAddress(), + LV.isVolatileQualified()); + StoreComplexToAddr(Pair, ReturnValue, LV.isVolatileQualified()); + } + else if (hasAggregateLLVMType(Ivar->getType())) { EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType()); } else { CodeGenTypes &Types = CGM.getTypes(); @@ -280,7 +285,8 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, // FIXME: We shouldn't need to get the function info here, the runtime // already should have computed it to build the function. EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args, - CC_Default, false), SetPropertyFn, + FunctionType::ExtInfo()), + SetPropertyFn, ReturnValueSlot(), Args); } else { // FIXME: Find a clean way to avoid AST node creation. @@ -459,12 +465,13 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ static const unsigned NumItems = 16; // Get selector - llvm::SmallVector<IdentifierInfo*, 3> II; - II.push_back(&CGM.getContext().Idents.get("countByEnumeratingWithState")); - II.push_back(&CGM.getContext().Idents.get("objects")); - II.push_back(&CGM.getContext().Idents.get("count")); - Selector FastEnumSel = CGM.getContext().Selectors.getSelector(II.size(), - &II[0]); + IdentifierInfo *II[] = { + &CGM.getContext().Idents.get("countByEnumeratingWithState"), + &CGM.getContext().Idents.get("objects"), + &CGM.getContext().Idents.get("count") + }; + Selector FastEnumSel = + CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]); QualType ItemsTy = getContext().getConstantArrayType(getContext().getObjCIdType(), @@ -555,7 +562,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ // FIXME: We shouldn't need to get the function info here, the runtime already // should have computed it to build the function. EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2, - CC_Default, false), + FunctionType::ExtInfo()), EnumerationMutationFn, ReturnValueSlot(), Args2); EmitBlock(WasNotMutated); diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp index 119819b..d445200 100644 --- a/lib/CodeGen/CGObjCGNU.cpp +++ b/lib/CodeGen/CGObjCGNU.cpp @@ -465,7 +465,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, CodeGenTypes &Types = CGM.getTypes(); const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs, - CC_Default, false); + FunctionType::ExtInfo()); const llvm::FunctionType *impType = Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false); @@ -573,7 +573,7 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF, CodeGenTypes &Types = CGM.getTypes(); const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs, - CC_Default, false); + FunctionType::ExtInfo()); const llvm::FunctionType *impType = Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false); @@ -1694,7 +1694,7 @@ llvm::Constant *CGObjCGNU::EnumerationMutationFunction() { Params.push_back(ASTIdTy); const llvm::FunctionType *FTy = Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params, - CC_Default, false), false); + FunctionType::ExtInfo()), false); return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation"); } diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp index 475280b..883ed98 100644 --- a/lib/CodeGen/CGObjCMac.cpp +++ b/lib/CodeGen/CGObjCMac.cpp @@ -13,6 +13,7 @@ #include "CGObjCRuntime.h" +#include "CGRecordLayout.h" #include "CodeGenModule.h" #include "CodeGenFunction.h" #include "clang/AST/ASTContext.h" @@ -306,7 +307,8 @@ public: Params.push_back(Ctx.BoolTy); const llvm::FunctionType *FTy = Types.GetFunctionType(Types.getFunctionInfo(IdType, Params, - CC_Default, false), false); + FunctionType::ExtInfo()), + false); return CGM.CreateRuntimeFunction(FTy, "objc_getProperty"); } @@ -325,7 +327,8 @@ public: Params.push_back(Ctx.BoolTy); const llvm::FunctionType *FTy = Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params, - CC_Default, false), false); + FunctionType::ExtInfo()), + false); return CGM.CreateRuntimeFunction(FTy, "objc_setProperty"); } @@ -337,7 +340,8 @@ public: Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType())); const llvm::FunctionType *FTy = Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params, - CC_Default, false), false); + FunctionType::ExtInfo()), + false); return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation"); } @@ -1559,7 +1563,7 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF, CodeGenTypes &Types = CGM.getTypes(); const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs, - CC_Default, false); + FunctionType::ExtInfo()); const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false); @@ -3131,8 +3135,10 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI, FieldDecl *Field = RecFields[i]; uint64_t FieldOffset; if (RD) { + const CGRecordLayout &RL = + CGM.getTypes().getCGRecordLayout(Field->getParent()); if (Field->isBitField()) { - CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field); + const CGRecordLayout::BitFieldInfo &Info = RL.getBitFieldInfo(Field); const llvm::Type *Ty = CGM.getTypes().ConvertTypeForMemRecursive(Field->getType()); @@ -3141,7 +3147,7 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI, FieldOffset = Info.FieldNo * TypeSize; } else FieldOffset = - Layout->getElementOffset(CGM.getTypes().getLLVMFieldNo(Field)); + Layout->getElementOffset(RL.getLLVMFieldNo(Field)); } else FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field)); @@ -5094,7 +5100,8 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend( // FIXME. This is too much work to get the ABI-specific result type needed to // find the message name. const CGFunctionInfo &FnInfo - = Types.getFunctionInfo(ResultType, CallArgList(), CC_Default, false); + = Types.getFunctionInfo(ResultType, CallArgList(), + FunctionType::ExtInfo()); llvm::Constant *Fn = 0; std::string Name("\01l_"); if (CGM.ReturnTypeUsesSret(FnInfo)) { @@ -5169,7 +5176,7 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend( ObjCTypes.MessageRefCPtrTy)); ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end()); const CGFunctionInfo &FnInfo1 = Types.getFunctionInfo(ResultType, ActualArgs, - CC_Default, false); + FunctionType::ExtInfo()); llvm::Value *Callee = CGF.Builder.CreateStructGEP(Arg1, 0); Callee = CGF.Builder.CreateLoad(Callee); const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo1, true); diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h index ff5d40b..b781940 100644 --- a/lib/CodeGen/CGObjCRuntime.h +++ b/lib/CodeGen/CGObjCRuntime.h @@ -16,7 +16,6 @@ #ifndef CLANG_CODEGEN_OBCJRUNTIME_H #define CLANG_CODEGEN_OBCJRUNTIME_H #include "clang/Basic/IdentifierTable.h" // Selector -#include "llvm/ADT/SmallVector.h" #include "clang/AST/DeclObjC.h" #include <string> diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp index 4907223..1caec97 100644 --- a/lib/CodeGen/CGRTTI.cpp +++ b/lib/CodeGen/CGRTTI.cpp @@ -148,7 +148,7 @@ public: }; /// BuildTypeInfo - Build the RTTI type info struct for the given type. - llvm::Constant *BuildTypeInfo(QualType Ty); + llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false); }; } @@ -327,83 +327,20 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) { if (ContainsIncompleteClassType(Ty)) return llvm::GlobalValue::InternalLinkage; - switch (Ty->getTypeClass()) { - default: - // FIXME: We need to add code to handle all types. - assert(false && "Unhandled type!"); - break; - - case Type::Pointer: { - const PointerType *PointerTy = cast<PointerType>(Ty); - - // If the pointee type has internal linkage, then the pointer type needs to - // have it as well. - if (getTypeInfoLinkage(PointerTy->getPointeeType()) == - llvm::GlobalVariable::InternalLinkage) - return llvm::GlobalVariable::InternalLinkage; - - return llvm::GlobalVariable::WeakODRLinkage; - } - - case Type::Enum: { - const EnumType *EnumTy = cast<EnumType>(Ty); - const EnumDecl *ED = EnumTy->getDecl(); - - // If we're in an anonymous namespace, then we always want internal linkage. - if (ED->isInAnonymousNamespace() || !ED->hasLinkage()) - return llvm::GlobalVariable::InternalLinkage; - - return llvm::GlobalValue::WeakODRLinkage; - } - - case Type::Record: { - const RecordType *RecordTy = cast<RecordType>(Ty); - const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); - - // If we're in an anonymous namespace, then we always want internal linkage. - if (RD->isInAnonymousNamespace() || !RD->hasLinkage()) - return llvm::GlobalVariable::InternalLinkage; - - // If this class does not have a vtable, we want weak linkage. - if (!RD->isDynamicClass()) - return llvm::GlobalValue::WeakODRLinkage; - - return CodeGenModule::getVtableLinkage(RD); - } - - case Type::Vector: - case Type::ExtVector: - case Type::Builtin: - return llvm::GlobalValue::WeakODRLinkage; - - case Type::FunctionProto: { - const FunctionProtoType *FPT = cast<FunctionProtoType>(Ty); + switch (Ty->getLinkage()) { + case NoLinkage: + case InternalLinkage: + case UniqueExternalLinkage: + return llvm::GlobalValue::InternalLinkage; - // Check the return type. - if (getTypeInfoLinkage(FPT->getResultType()) == - llvm::GlobalValue::InternalLinkage) - return llvm::GlobalValue::InternalLinkage; - - // Check the parameter types. - for (unsigned i = 0; i != FPT->getNumArgs(); ++i) { - if (getTypeInfoLinkage(FPT->getArgType(i)) == - llvm::GlobalValue::InternalLinkage) - return llvm::GlobalValue::InternalLinkage; + case ExternalLinkage: + if (const RecordType *Record = dyn_cast<RecordType>(Ty)) { + const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()); + if (RD->isDynamicClass()) + return CodeGenModule::getVtableLinkage(RD); } - - return llvm::GlobalValue::WeakODRLinkage; - } - - case Type::ConstantArray: - case Type::IncompleteArray: { - const ArrayType *AT = cast<ArrayType>(Ty); - - // Check the element type. - if (getTypeInfoLinkage(AT->getElementType()) == - llvm::GlobalValue::InternalLinkage) - return llvm::GlobalValue::InternalLinkage; - } + return llvm::GlobalValue::WeakODRLinkage; } return llvm::GlobalValue::WeakODRLinkage; @@ -444,6 +381,7 @@ void RTTIBuilder::BuildVtablePointer(const Type *Ty) { switch (Ty->getTypeClass()) { default: assert(0 && "Unhandled type!"); + case Type::Builtin: // GCC treats vector types as fundamental types. case Type::Vector: case Type::ExtVector: @@ -511,7 +449,7 @@ void RTTIBuilder::BuildVtablePointer(const Type *Ty) { Fields.push_back(Vtable); } -llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty) { +llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) { // We want to operate on the canonical type. Ty = CGM.getContext().getCanonicalType(Ty); @@ -525,7 +463,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty) { return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy); // Check if there is already an external RTTI descriptor for this type. - if (ShouldUseExternalRTTIDescriptor(Ty)) + if (!Force && ShouldUseExternalRTTIDescriptor(Ty)) return GetAddrOfExternalRTTIDescriptor(Ty); llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(Ty); @@ -538,11 +476,9 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty) { switch (Ty->getTypeClass()) { default: assert(false && "Unhandled type class!"); - case Type::Builtin: - assert(false && "Builtin type info must be in the standard library!"); - break; // GCC treats vector types as fundamental types. + case Type::Builtin: case Type::Vector: case Type::ExtVector: // Itanium C++ ABI 2.9.5p4: @@ -760,7 +696,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { // subobject. For a virtual base, this is the offset in the virtual table of // the virtual base offset for the virtual base referenced (negative). if (Base->isVirtual()) - OffsetFlags = CGM.getVtableInfo().getVirtualBaseOffsetOffset(RD, BaseDecl); + OffsetFlags = CGM.getVTables().getVirtualBaseOffsetOffset(RD, BaseDecl); else { const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); OffsetFlags = Layout.getBaseClassOffset(BaseDecl) / 8; @@ -854,3 +790,61 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty) { return RTTIBuilder(*this).BuildTypeInfo(Ty); } + +// Try to find the magic class __cxxabiv1::__fundamental_type_info. If +// exists and has a destructor, we will emit the typeinfo for the fundamental +// types. This is the same behaviour as GCC. +static CXXRecordDecl *FindMagicClass(ASTContext &AC) { + const IdentifierInfo &NamespaceII = AC.Idents.get("__cxxabiv1"); + DeclarationName NamespaceDN = AC.DeclarationNames.getIdentifier(&NamespaceII); + TranslationUnitDecl *TUD = AC.getTranslationUnitDecl(); + DeclContext::lookup_result NamespaceLookup = TUD->lookup(NamespaceDN); + if (NamespaceLookup.first == NamespaceLookup.second) + return NULL; + const NamespaceDecl *Namespace = + dyn_cast<NamespaceDecl>(*NamespaceLookup.first); + if (!Namespace) + return NULL; + + const IdentifierInfo &ClassII = AC.Idents.get("__fundamental_type_info"); + DeclarationName ClassDN = AC.DeclarationNames.getIdentifier(&ClassII); + DeclContext::lookup_const_result ClassLookup = Namespace->lookup(ClassDN); + if (ClassLookup.first == ClassLookup.second) + return NULL; + CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(*ClassLookup.first); + + if (Class->hasDefinition() && Class->isDynamicClass() && + Class->getDestructor(AC)) + return Class; + + return NULL; +} + +void CodeGenModule::EmitFundamentalRTTIDescriptor(QualType Type) { + QualType PointerType = Context.getPointerType(Type); + QualType PointerTypeConst = Context.getPointerType(Type.withConst()); + RTTIBuilder(*this).BuildTypeInfo(Type, true); + RTTIBuilder(*this).BuildTypeInfo(PointerType, true); + RTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true); +} + +void CodeGenModule::EmitFundamentalRTTIDescriptors() { + CXXRecordDecl *RD = FindMagicClass(getContext()); + if (!RD) + return; + + getVTables().GenerateClassData(getVtableLinkage(RD), RD); + + QualType FundamentalTypes[] = { Context.VoidTy, Context.Char32Ty, + Context.Char16Ty, Context.UnsignedLongLongTy, + Context.LongLongTy, Context.WCharTy, + Context.UnsignedShortTy, Context.ShortTy, + Context.UnsignedLongTy, Context.LongTy, + Context.UnsignedIntTy, Context.IntTy, + Context.UnsignedCharTy, Context.FloatTy, + Context.LongDoubleTy, Context.DoubleTy, + Context.CharTy, Context.BoolTy, + Context.SignedCharTy }; + for (unsigned i = 0; i < sizeof(FundamentalTypes)/sizeof(QualType); ++i) + EmitFundamentalRTTIDescriptor(FundamentalTypes[i]); +} diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h new file mode 100644 index 0000000..d0d8f98 --- /dev/null +++ b/lib/CodeGen/CGRecordLayout.h @@ -0,0 +1,95 @@ +//===--- CGRecordLayout.h - LLVM Record Layout Information ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CGRECORDLAYOUT_H +#define CLANG_CODEGEN_CGRECORDLAYOUT_H + +#include "llvm/ADT/DenseMap.h" +#include "clang/AST/Decl.h" +namespace llvm { + class Type; +} + +namespace clang { +namespace CodeGen { + +/// CGRecordLayout - This class handles struct and union layout info while +/// lowering AST types to LLVM types. +/// +/// These layout objects are only created on demand as IR generation requires. +class CGRecordLayout { + friend class CodeGenTypes; + + CGRecordLayout(const CGRecordLayout&); // DO NOT IMPLEMENT + void operator=(const CGRecordLayout&); // DO NOT IMPLEMENT + +public: + struct BitFieldInfo { + BitFieldInfo(unsigned FieldNo, + unsigned Start, + unsigned Size) + : FieldNo(FieldNo), Start(Start), Size(Size) {} + + unsigned FieldNo; + unsigned Start; + unsigned Size; + }; + +private: + /// The LLVMType corresponding to this record layout. + const llvm::Type *LLVMType; + + /// Map from (non-bit-field) struct field to the corresponding llvm struct + /// type field no. This info is populated by record builder. + llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo; + + /// Map from (bit-field) struct field to the corresponding llvm struct type + /// field no. This info is populated by record builder. + llvm::DenseMap<const FieldDecl *, BitFieldInfo> BitFields; + + /// Whether one of the fields in this record layout is a pointer to data + /// member, or a struct that contains pointer to data member. + bool ContainsPointerToDataMember : 1; + +public: + CGRecordLayout(const llvm::Type *T, bool ContainsPointerToDataMember) + : LLVMType(T), ContainsPointerToDataMember(ContainsPointerToDataMember) {} + + /// \brief Return the LLVM type associated with this record. + const llvm::Type *getLLVMType() const { + return LLVMType; + } + + /// \brief Check whether this struct contains pointers to data members. + bool containsPointerToDataMember() const { + return ContainsPointerToDataMember; + } + + /// \brief Return the BitFieldInfo that corresponds to the field FD. + unsigned getLLVMFieldNo(const FieldDecl *FD) const { + assert(!FD->isBitField() && "Invalid call for bit-field decl!"); + assert(FieldInfo.count(FD) && "Invalid field for record!"); + return FieldInfo.lookup(FD); + } + + /// \brief Return llvm::StructType element number that corresponds to the + /// field FD. + const BitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const { + assert(FD->isBitField() && "Invalid call for non bit-field decl!"); + llvm::DenseMap<const FieldDecl *, BitFieldInfo>::const_iterator + it = BitFields.find(FD); + assert(it != BitFields.end() && "Unable to find bitfield info"); + return it->second; + } +}; + +} // end namespace CodeGen +} // end namespace clang + +#endif diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp index baafd68..daebabd 100644 --- a/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -1,4 +1,4 @@ -//===--- CGRecordLayoutBuilder.cpp - Record builder helper ------*- C++ -*-===// +//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -7,25 +7,131 @@ // //===----------------------------------------------------------------------===// // -// This is a helper class used to build CGRecordLayout objects and LLVM types. +// Builder implementation for CGRecordLayout objects. // //===----------------------------------------------------------------------===// -#include "CGRecordLayoutBuilder.h" - +#include "CGRecordLayout.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/Expr.h" #include "clang/AST/RecordLayout.h" #include "CodeGenTypes.h" +#include "llvm/Type.h" #include "llvm/DerivedTypes.h" #include "llvm/Target/TargetData.h" - - using namespace clang; using namespace CodeGen; +namespace clang { +namespace CodeGen { + +class CGRecordLayoutBuilder { +public: + /// FieldTypes - Holds the LLVM types that the struct is created from. + std::vector<const llvm::Type *> FieldTypes; + + /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number. + typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo; + llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields; + + /// LLVMBitFieldInfo - Holds location and size information about a bit field. + struct LLVMBitFieldInfo { + LLVMBitFieldInfo(const FieldDecl *FD, unsigned FieldNo, unsigned Start, + unsigned Size) + : FD(FD), FieldNo(FieldNo), Start(Start), Size(Size) { } + + const FieldDecl *FD; + + unsigned FieldNo; + unsigned Start; + unsigned Size; + }; + llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields; + + /// ContainsPointerToDataMember - Whether one of the fields in this record + /// layout is a pointer to data member, or a struct that contains pointer to + /// data member. + bool ContainsPointerToDataMember; + + /// Packed - Whether the resulting LLVM struct will be packed or not. + bool Packed; + +private: + CodeGenTypes &Types; + + /// Alignment - Contains the alignment of the RecordDecl. + // + // FIXME: This is not needed and should be removed. + unsigned Alignment; + + /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the + /// LLVM types. + unsigned AlignmentAsLLVMStruct; + + /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field, + /// this will have the number of bits still available in the field. + char BitsAvailableInLastField; + + /// NextFieldOffsetInBytes - Holds the next field offset in bytes. + uint64_t NextFieldOffsetInBytes; + + /// LayoutUnion - Will layout a union RecordDecl. + void LayoutUnion(const RecordDecl *D); + + /// LayoutField - try to layout all fields in the record decl. + /// Returns false if the operation failed because the struct is not packed. + bool LayoutFields(const RecordDecl *D); + + /// LayoutBases - layout the bases and vtable pointer of a record decl. + void LayoutBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout); + + /// LayoutField - layout a single field. Returns false if the operation failed + /// because the current struct is not packed. + bool LayoutField(const FieldDecl *D, uint64_t FieldOffset); + + /// LayoutBitField - layout a single bit field. + void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); + + /// AppendField - Appends a field with the given offset and type. + void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy); + + /// AppendPadding - Appends enough padding bytes so that the total struct + /// size matches the alignment of the passed in type. + void AppendPadding(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy); + + /// AppendPadding - Appends enough padding bytes so that the total + /// struct size is a multiple of the field alignment. + void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment); + + /// AppendBytes - Append a given number of bytes to the record. + void AppendBytes(uint64_t NumBytes); + + /// AppendTailPadding - Append enough tail padding so that the type will have + /// the passed size. + void AppendTailPadding(uint64_t RecordSize); + + unsigned getTypeAlignment(const llvm::Type *Ty) const; + uint64_t getTypeSizeInBytes(const llvm::Type *Ty) const; + + /// CheckForPointerToDataMember - Check if the given type contains a pointer + /// to data member. + void CheckForPointerToDataMember(QualType T); + +public: + CGRecordLayoutBuilder(CodeGenTypes &Types) + : ContainsPointerToDataMember(false), Packed(false), Types(Types), + Alignment(0), AlignmentAsLLVMStruct(1), + BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { } + + /// Layout - Will layout a RecordDecl. + void Layout(const RecordDecl *D); +}; + +} +} + void CGRecordLayoutBuilder::Layout(const RecordDecl *D) { Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8; Packed = D->hasAttr<PackedAttr>(); @@ -110,7 +216,7 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, // Check if we have a pointer to data member in this field. CheckForPointerToDataMember(D->getType()); - + assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!"); uint64_t FieldOffsetInBytes = FieldOffset / 8; @@ -166,7 +272,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { unsigned Align = 0; bool HasOnlyZeroSizedBitFields = true; - + unsigned FieldNo = 0; for (RecordDecl::field_iterator Field = D->field_begin(), FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) { @@ -182,12 +288,13 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { continue; // Add the bit field info. - Types.addBitFieldInfo(*Field, 0, 0, FieldSize); - } else - Types.addFieldInfo(*Field, 0); + LLVMBitFields.push_back(LLVMBitFieldInfo(*Field, 0, 0, FieldSize)); + } else { + LLVMFields.push_back(LLVMFieldInfo(*Field, 0)); + } HasOnlyZeroSizedBitFields = false; - + const llvm::Type *FieldTy = Types.ConvertTypeForMemRecursive(Field->getType()); unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy); @@ -218,7 +325,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { "0-align record did not have all zero-sized bit-fields!"); Align = 1; } - + // Append tail padding. if (Layout.getSize() / 8 > Size) AppendPadding(Layout.getSize() / 8, Align); @@ -228,9 +335,9 @@ void CGRecordLayoutBuilder::LayoutBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout) { // Check if we need to add a vtable pointer. if (RD->isDynamicClass() && !Layout.getPrimaryBase()) { - const llvm::Type *Int8PtrTy = + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(Types.getLLVMContext()); - + assert(NextFieldOffsetInBytes == 0 && "Vtable pointer must come first!"); AppendField(NextFieldOffsetInBytes, Int8PtrTy->getPointerTo()); @@ -245,7 +352,7 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) { if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) LayoutBases(RD, Layout); - + unsigned FieldNo = 0; for (RecordDecl::field_iterator Field = D->field_begin(), @@ -269,14 +376,14 @@ void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) { uint64_t RecordSizeInBytes = RecordSize / 8; assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!"); - uint64_t AlignedNextFieldOffset = + uint64_t AlignedNextFieldOffset = llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct); if (AlignedNextFieldOffset == RecordSizeInBytes) { // We don't need any padding. return; } - + unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes; AppendBytes(NumPadBytes); } @@ -359,46 +466,49 @@ void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) { } } else if (const RecordType *RT = T->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); - + // FIXME: It would be better if there was a way to explicitly compute the // record layout instead of converting to a type. Types.ConvertTagDeclType(RD); - + const CGRecordLayout &Layout = Types.getCGRecordLayout(RD); - + if (Layout.containsPointerToDataMember()) ContainsPointerToDataMember = true; - } + } } -CGRecordLayout * -CGRecordLayoutBuilder::ComputeLayout(CodeGenTypes &Types, - const RecordDecl *D) { - CGRecordLayoutBuilder Builder(Types); +CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) { + CGRecordLayoutBuilder Builder(*this); Builder.Layout(D); - const llvm::Type *Ty = llvm::StructType::get(Types.getLLVMContext(), + const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(), Builder.FieldTypes, Builder.Packed); - assert(Types.getContext().getASTRecordLayout(D).getSize() / 8 == - Types.getTargetData().getTypeAllocSize(Ty) && + assert(getContext().getASTRecordLayout(D).getSize() / 8 == + getTargetData().getTypeAllocSize(Ty) && "Type size mismatch!"); + CGRecordLayout *RL = + new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember); + // Add all the field numbers. for (unsigned i = 0, e = Builder.LLVMFields.size(); i != e; ++i) { const FieldDecl *FD = Builder.LLVMFields[i].first; unsigned FieldNo = Builder.LLVMFields[i].second; - Types.addFieldInfo(FD, FieldNo); + RL->FieldInfo.insert(std::make_pair(FD, FieldNo)); } // Add bitfield info. for (unsigned i = 0, e = Builder.LLVMBitFields.size(); i != e; ++i) { - const LLVMBitFieldInfo &Info = Builder.LLVMBitFields[i]; + const CGRecordLayoutBuilder::LLVMBitFieldInfo &Info = + Builder.LLVMBitFields[i]; - Types.addBitFieldInfo(Info.FD, Info.FieldNo, Info.Start, Info.Size); + CGRecordLayout::BitFieldInfo BFI(Info.FieldNo, Info.Start, Info.Size); + RL->BitFields.insert(std::make_pair(Info.FD, BFI)); } - return new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember); + return RL; } diff --git a/lib/CodeGen/CGRecordLayoutBuilder.h b/lib/CodeGen/CGRecordLayoutBuilder.h deleted file mode 100644 index eb60ed7..0000000 --- a/lib/CodeGen/CGRecordLayoutBuilder.h +++ /dev/null @@ -1,142 +0,0 @@ -//===--- CGRecordLayoutBuilder.h - Record builder helper --------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This is a helper class used to build CGRecordLayout objects and LLVM types. -// -//===----------------------------------------------------------------------===// - -#ifndef CLANG_CODEGEN_CGRECORDLAYOUTBUILDER_H -#define CLANG_CODEGEN_CGRECORDLAYOUTBUILDER_H - -#include "llvm/ADT/SmallVector.h" -#include "llvm/System/DataTypes.h" -#include <vector> - -namespace llvm { - class Type; -} - -namespace clang { - class ASTRecordLayout; - class CXXRecordDecl; - class FieldDecl; - class RecordDecl; - class QualType; - -namespace CodeGen { - class CGRecordLayout; - class CodeGenTypes; - -class CGRecordLayoutBuilder { - CodeGenTypes &Types; - - /// Packed - Whether the resulting LLVM struct will be packed or not. - bool Packed; - - /// ContainsPointerToDataMember - Whether one of the fields in this record - /// layout is a pointer to data member, or a struct that contains pointer to - /// data member. - bool ContainsPointerToDataMember; - - /// Alignment - Contains the alignment of the RecordDecl. - unsigned Alignment; - - /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the - /// LLVM types. - unsigned AlignmentAsLLVMStruct; - - /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field, - /// this will have the number of bits still available in the field. - char BitsAvailableInLastField; - - /// NextFieldOffsetInBytes - Holds the next field offset in bytes. - uint64_t NextFieldOffsetInBytes; - - /// FieldTypes - Holds the LLVM types that the struct is created from. - std::vector<const llvm::Type *> FieldTypes; - - /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number. - typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo; - llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields; - - /// LLVMBitFieldInfo - Holds location and size information about a bit field. - struct LLVMBitFieldInfo { - LLVMBitFieldInfo(const FieldDecl *FD, unsigned FieldNo, unsigned Start, - unsigned Size) - : FD(FD), FieldNo(FieldNo), Start(Start), Size(Size) { } - - const FieldDecl *FD; - - unsigned FieldNo; - unsigned Start; - unsigned Size; - }; - llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields; - - CGRecordLayoutBuilder(CodeGenTypes &Types) - : Types(Types), Packed(false), ContainsPointerToDataMember(false) - , Alignment(0), AlignmentAsLLVMStruct(1) - , BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { } - - /// Layout - Will layout a RecordDecl. - void Layout(const RecordDecl *D); - - /// LayoutUnion - Will layout a union RecordDecl. - void LayoutUnion(const RecordDecl *D); - - /// LayoutField - try to layout all fields in the record decl. - /// Returns false if the operation failed because the struct is not packed. - bool LayoutFields(const RecordDecl *D); - - /// LayoutBases - layout the bases and vtable pointer of a record decl. - void LayoutBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout); - - /// LayoutField - layout a single field. Returns false if the operation failed - /// because the current struct is not packed. - bool LayoutField(const FieldDecl *D, uint64_t FieldOffset); - - /// LayoutBitField - layout a single bit field. - void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); - - /// AppendField - Appends a field with the given offset and type. - void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy); - - /// AppendPadding - Appends enough padding bytes so that the total struct - /// size matches the alignment of the passed in type. - void AppendPadding(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy); - - /// AppendPadding - Appends enough padding bytes so that the total - /// struct size is a multiple of the field alignment. - void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment); - - /// AppendBytes - Append a given number of bytes to the record. - void AppendBytes(uint64_t NumBytes); - - /// AppendTailPadding - Append enough tail padding so that the type will have - /// the passed size. - void AppendTailPadding(uint64_t RecordSize); - - unsigned getTypeAlignment(const llvm::Type *Ty) const; - uint64_t getTypeSizeInBytes(const llvm::Type *Ty) const; - - /// CheckForPointerToDataMember - Check if the given type contains a pointer - /// to data member. - void CheckForPointerToDataMember(QualType T); - -public: - /// ComputeLayout - Return the right record layout for a given record decl. - static CGRecordLayout *ComputeLayout(CodeGenTypes &Types, - const RecordDecl *D); -}; - -} // end namespace CodeGen -} // end namespace clang - - -#endif diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp index a889e55..ae2f791 100644 --- a/lib/CodeGen/CGStmt.cpp +++ b/lib/CodeGen/CGStmt.cpp @@ -607,7 +607,8 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { } else if (FnRetTy->isReferenceType()) { // If this function returns a reference, take the address of the expression // rather than the value. - Builder.CreateStore(EmitLValue(RV).getAddress(), ReturnValue); + RValue Result = EmitReferenceBindingToExpr(RV, false); + Builder.CreateStore(Result.getScalarVal(), ReturnValue); } else if (!hasAggregateLLVMType(RV->getType())) { Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); } else if (RV->getType()->isAnyComplexType()) { diff --git a/lib/CodeGen/CGTemporaries.cpp b/lib/CodeGen/CGTemporaries.cpp index bed8439..6d38ab9 100644 --- a/lib/CodeGen/CGTemporaries.cpp +++ b/lib/CodeGen/CGTemporaries.cpp @@ -127,15 +127,14 @@ CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E, size_t CleanupStackDepth = CleanupEntries.size(); (void) CleanupStackDepth; - unsigned OldNumLiveTemporaries = LiveTemporaries.size(); - - RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile, - /*IgnoreResult=*/false, IsInitializer); - - // Pop temporaries. - while (LiveTemporaries.size() > OldNumLiveTemporaries) - PopCXXTemporary(); + RValue RV; + + { + CXXTemporariesCleanupScope Scope(*this); + RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile, + /*IgnoreResult=*/false, IsInitializer); + } assert(CleanupEntries.size() == CleanupStackDepth && "Cleanup size mismatch!"); diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp index 96c104b..91d9f76 100644 --- a/lib/CodeGen/CGVTT.cpp +++ b/lib/CodeGen/CGVTT.cpp @@ -19,283 +19,355 @@ using namespace CodeGen; #define D1(x) namespace { + +/// VTT builder - Class for building VTT layout information. class VTTBuilder { - /// Inits - The list of values built for the VTT. - std::vector<llvm::Constant *> &Inits; - /// Class - The most derived class that this vtable is being built for. - const CXXRecordDecl *Class; - CodeGenModule &CGM; // Per-module state. - llvm::SmallSet<const CXXRecordDecl *, 32> SeenVBase; - /// BLayout - Layout for the most derived class that this vtable is being - /// built for. - const ASTRecordLayout &BLayout; - CGVtableInfo::AddrMap_t &AddressPoints; - // vtbl - A pointer to the vtable for Class. - llvm::Constant *ClassVtbl; - llvm::LLVMContext &VMContext; - - /// SeenVBasesInSecondary - The seen virtual bases when building the - /// secondary virtual pointers. - llvm::SmallPtrSet<const CXXRecordDecl *, 32> SeenVBasesInSecondary; + + CodeGenModule &CGM; - llvm::DenseMap<const CXXRecordDecl *, uint64_t> SubVTTIndicies; + /// MostDerivedClass - The most derived class for which we're building this + /// vtable. + const CXXRecordDecl *MostDerivedClass; + + typedef llvm::SmallVector<llvm::Constant *, 64> VTTComponentsVectorTy; - bool GenerateDefinition; + /// VTTComponents - The VTT components. + VTTComponentsVectorTy VTTComponents; + + /// MostDerivedClassLayout - the AST record layout of the most derived class. + const ASTRecordLayout &MostDerivedClassLayout; + + typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy; - llvm::DenseMap<BaseSubobject, llvm::Constant *> CtorVtables; - llvm::DenseMap<std::pair<const CXXRecordDecl *, BaseSubobject>, uint64_t> - CtorVtableAddressPoints; + typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy; + + /// SubVTTIndicies - The sub-VTT indices for the bases of the most derived + /// class. + llvm::DenseMap<const CXXRecordDecl *, uint64_t> SubVTTIndicies; + + /// SecondaryVirtualPointerIndices - The secondary virtual pointer indices of + /// all subobjects of the most derived class. + llvm::DenseMap<BaseSubobject, uint64_t> SecondaryVirtualPointerIndices; + + /// GenerateDefinition - Whether the VTT builder should generate LLVM IR for + /// the VTT. + bool GenerateDefinition; - llvm::Constant *getCtorVtable(const BaseSubobject &Base, - bool BaseIsVirtual) { - if (!GenerateDefinition) - return 0; - - llvm::Constant *&CtorVtable = CtorVtables[Base]; - if (!CtorVtable) { - // Build the vtable. - CGVtableInfo::CtorVtableInfo Info - = CGM.getVtableInfo().getCtorVtable(Class, Base, BaseIsVirtual); - - CtorVtable = Info.Vtable; - - // Add the address points for this base. - for (CGVtableInfo::AddressPointsMapTy::const_iterator I = - Info.AddressPoints.begin(), E = Info.AddressPoints.end(); - I != E; ++I) { - uint64_t &AddressPoint = - CtorVtableAddressPoints[std::make_pair(Base.getBase(), I->first)]; - - // Check if we already have the address points for this base. - if (AddressPoint) - break; - - // Otherwise, insert it. - AddressPoint = I->second; - } - } - - return CtorVtable; + /// GetAddrOfVTable - Returns the address of the vtable for the base class in + /// the given vtable class. + /// + /// \param AddressPoints - If the returned vtable is a construction vtable, + /// this will hold the address points for it. + llvm::Constant *GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual, + AddressPointsMapTy& AddressPoints); + + /// AddVTablePointer - Add a vtable pointer to the VTT currently being built. + /// + /// \param AddressPoints - If the vtable is a construction vtable, this has + /// the address points for it. + void AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable, + const CXXRecordDecl *VTableClass, + const AddressPointsMapTy& AddressPoints); + + /// LayoutSecondaryVTTs - Lay out the secondary VTTs of the given base + /// subobject. + void LayoutSecondaryVTTs(BaseSubobject Base); + + /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers + /// for the given base subobject. + /// + /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base + /// or a direct or indirect base of a virtual base. + /// + /// \param AddressPoints - If the vtable is a construction vtable, this has + /// the address points for it. + void LayoutSecondaryVirtualPointers(BaseSubobject Base, + bool BaseIsMorallyVirtual, + llvm::Constant *VTable, + const CXXRecordDecl *VTableClass, + const AddressPointsMapTy& AddressPoints, + VisitedVirtualBasesSetTy &VBases); + + /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers + /// for the given base subobject. + /// + /// \param AddressPoints - If the vtable is a construction vtable, this has + /// the address points for it. + void LayoutSecondaryVirtualPointers(BaseSubobject Base, + llvm::Constant *VTable, + const AddressPointsMapTy& AddressPoints); + + /// LayoutVirtualVTTs - Lay out the VTTs for the virtual base classes of the + /// given record decl. + void LayoutVirtualVTTs(const CXXRecordDecl *RD, + VisitedVirtualBasesSetTy &VBases); + + /// LayoutVTT - Will lay out the VTT for the given subobject, including any + /// secondary VTTs, secondary virtual pointers and virtual VTTs. + void LayoutVTT(BaseSubobject Base, bool BaseIsVirtual); + +public: + VTTBuilder(CodeGenModule &CGM, const CXXRecordDecl *MostDerivedClass, + bool GenerateDefinition); + + // getVTTComponents - Returns a reference to the VTT components. + const VTTComponentsVectorTy &getVTTComponents() const { + return VTTComponents; } + /// getSubVTTIndicies - Returns a reference to the sub-VTT indices. + const llvm::DenseMap<const CXXRecordDecl *, uint64_t> & + getSubVTTIndicies() const { + return SubVTTIndicies; + } - /// BuildVtablePtr - Build up a referene to the given secondary vtable - llvm::Constant *BuildVtablePtr(llvm::Constant *Vtable, - const CXXRecordDecl *VtableClass, - const CXXRecordDecl *RD, - uint64_t Offset) { - if (!GenerateDefinition) - return 0; - - uint64_t AddressPoint; - - if (VtableClass != Class) { - // We have a ctor vtable, look for the address point in the ctor vtable - // address points. - AddressPoint = - CtorVtableAddressPoints[std::make_pair(VtableClass, - BaseSubobject(RD, Offset))]; - } else { - AddressPoint = - (*AddressPoints[VtableClass])[std::make_pair(RD, Offset)]; - } + /// getSecondaryVirtualPointerIndices - Returns a reference to the secondary + /// virtual pointer indices. + const llvm::DenseMap<BaseSubobject, uint64_t> & + getSecondaryVirtualPointerIndices() const { + return SecondaryVirtualPointerIndices; + } - // FIXME: We can never have 0 address point. Do this for now so gepping - // retains the same structure. Later we'll just assert. - if (AddressPoint == 0) - AddressPoint = 1; - D1(printf("XXX address point for %s in %s layout %s at offset %d was %d\n", - RD->getNameAsCString(), VtblClass->getNameAsCString(), - Class->getNameAsCString(), (int)Offset, (int)AddressPoint)); - - llvm::Value *Idxs[] = { - llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 0), - llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), AddressPoint) - }; +}; + +VTTBuilder::VTTBuilder(CodeGenModule &CGM, + const CXXRecordDecl *MostDerivedClass, + bool GenerateDefinition) + : CGM(CGM), MostDerivedClass(MostDerivedClass), + MostDerivedClassLayout(CGM.getContext().getASTRecordLayout(MostDerivedClass)), + GenerateDefinition(GenerateDefinition) { - llvm::Constant *Init = - llvm::ConstantExpr::getInBoundsGetElementPtr(Vtable, Idxs, 2); + // Lay out this VTT. + LayoutVTT(BaseSubobject(MostDerivedClass, 0), /*BaseIsVirtual=*/false); +} - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); - return llvm::ConstantExpr::getBitCast(Init, Int8PtrTy); +llvm::Constant * +VTTBuilder::GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual, + AddressPointsMapTy& AddressPoints) { + if (!GenerateDefinition) + return 0; + + if (Base.getBase() == MostDerivedClass) { + assert(Base.getBaseOffset() == 0 && + "Most derived class vtable must have a zero offset!"); + // This is a regular vtable. + return CGM.getVTables().GetAddrOfVTable(MostDerivedClass); } + + return CGM.getVTables().GenerateConstructionVTable(MostDerivedClass, + Base, BaseIsVirtual, + AddressPoints); +} - /// Secondary - Add the secondary vtable pointers to Inits. Offset is the - /// current offset in bits to the object we're working on. - void Secondary(const CXXRecordDecl *RD, llvm::Constant *vtbl, - const CXXRecordDecl *VtblClass, uint64_t Offset=0, - bool MorallyVirtual=false) { - if (RD->getNumVBases() == 0 && ! MorallyVirtual) - return; - - for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), - e = RD->bases_end(); i != e; ++i) { - const CXXRecordDecl *Base = - cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); - - // We only want to visit each virtual base once. - if (i->isVirtual() && SeenVBasesInSecondary.count(Base)) - continue; - - // Itanium C++ ABI 2.6.2: - // Secondary virtual pointers are present for all bases with either - // virtual bases or virtual function declarations overridden along a - // virtual path. - // - // If the base class is not dynamic, we don't want to add it, nor any - // of its base classes. - if (!Base->isDynamicClass()) - continue; +void VTTBuilder::AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable, + const CXXRecordDecl *VTableClass, + const AddressPointsMapTy& AddressPoints) { + // Store the vtable pointer index if we're generating the primary VTT. + if (VTableClass == MostDerivedClass) { + assert(!SecondaryVirtualPointerIndices.count(Base) && + "A virtual pointer index already exists for this base subobject!"); + SecondaryVirtualPointerIndices[Base] = VTTComponents.size(); + } - const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); - const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); - const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual(); - bool NonVirtualPrimaryBase; - NonVirtualPrimaryBase = !PrimaryBaseWasVirtual && Base == PrimaryBase; - bool BaseMorallyVirtual = MorallyVirtual | i->isVirtual(); - uint64_t BaseOffset; - if (!i->isVirtual()) { - const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); - BaseOffset = Offset + Layout.getBaseClassOffset(Base); - } else - BaseOffset = BLayout.getVBaseClassOffset(Base); - llvm::Constant *subvtbl = vtbl; - const CXXRecordDecl *subVtblClass = VtblClass; - if ((Base->getNumVBases() || BaseMorallyVirtual) - && !NonVirtualPrimaryBase) { - llvm::Constant *init; - if (BaseMorallyVirtual || VtblClass == Class) - init = BuildVtablePtr(vtbl, VtblClass, Base, BaseOffset); - else { - init = getCtorVtable(BaseSubobject(Base, BaseOffset), i->isVirtual()); - - subvtbl = init; - subVtblClass = Base; - - init = BuildVtablePtr(init, Class, Base, BaseOffset); - } - - Inits.push_back(init); - } - - if (i->isVirtual()) - SeenVBasesInSecondary.insert(Base); - - Secondary(Base, subvtbl, subVtblClass, BaseOffset, BaseMorallyVirtual); - } + if (!GenerateDefinition) { + VTTComponents.push_back(0); + return; } - /// BuiltVTT - Add the VTT to Inits. Offset is the offset in bits to the - /// currnet object we're working on. - void BuildVTT(const CXXRecordDecl *RD, uint64_t Offset, bool BaseIsVirtual, - bool MorallyVirtual) { - // Itanium C++ ABI 2.6.2: - // An array of virtual table addresses, called the VTT, is declared for - // each class type that has indirect or direct virtual base classes. - if (RD->getNumVBases() == 0) - return; + uint64_t AddressPoint; + if (VTableClass != MostDerivedClass) { + // The vtable is a construction vtable, look in the construction vtable + // address points. + AddressPoint = AddressPoints.lookup(Base); + } else { + // Just get the address point for the regular vtable. + AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass); + } - // Remember the sub-VTT index. - SubVTTIndicies[RD] = Inits.size(); + if (!AddressPoint) AddressPoint = 0; + assert(AddressPoint != 0 && "Did not find an address point!"); + + llvm::Value *Idxs[] = { + llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0), + llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()), + AddressPoint) + }; + + llvm::Constant *Init = + llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Idxs, 2); + + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); + Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy); + + VTTComponents.push_back(Init); +} - llvm::Constant *Vtable; - const CXXRecordDecl *VtableClass; +void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) { + const CXXRecordDecl *RD = Base.getBase(); - // First comes the primary virtual table pointer... - if (MorallyVirtual) { - Vtable = ClassVtbl; - VtableClass = Class; - } else { - Vtable = getCtorVtable(BaseSubobject(RD, Offset), - /*IsVirtual=*/BaseIsVirtual); - VtableClass = RD; - } + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { - llvm::Constant *Init = BuildVtablePtr(Vtable, VtableClass, RD, Offset); - Inits.push_back(Init); - - // then the secondary VTTs.... - SecondaryVTTs(RD, Offset, MorallyVirtual); + // Don't layout virtual bases. + if (I->isVirtual()) + continue; - // Make sure to clear the set of seen virtual bases. - SeenVBasesInSecondary.clear(); + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); - // and last the secondary vtable pointers. - Secondary(RD, Vtable, VtableClass, Offset, MorallyVirtual); + const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); + uint64_t BaseOffset = Base.getBaseOffset() + + Layout.getBaseClassOffset(BaseDecl); + + // Layout the VTT for this base. + LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false); } +} + +void +VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base, + bool BaseIsMorallyVirtual, + llvm::Constant *VTable, + const CXXRecordDecl *VTableClass, + const AddressPointsMapTy& AddressPoints, + VisitedVirtualBasesSetTy &VBases) { + const CXXRecordDecl *RD = Base.getBase(); + + // We're not interested in bases that don't have virtual bases, and not + // morally virtual bases. + if (!RD->getNumVBases() && !BaseIsMorallyVirtual) + return; - /// SecondaryVTTs - Add the secondary VTTs to Inits. The secondary VTTs are - /// built from each direct non-virtual proper base that requires a VTT in - /// declaration order. - void SecondaryVTTs(const CXXRecordDecl *RD, uint64_t Offset=0, - bool MorallyVirtual=false) { - for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), - e = RD->bases_end(); i != e; ++i) { - const CXXRecordDecl *Base = - cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); - if (i->isVirtual()) + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + // Itanium C++ ABI 2.6.2: + // Secondary virtual pointers are present for all bases with either + // virtual bases or virtual function declarations overridden along a + // virtual path. + // + // If the base class is not dynamic, we don't want to add it, nor any + // of its base classes. + if (!BaseDecl->isDynamicClass()) + continue; + + bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual; + bool BaseDeclIsNonVirtualPrimaryBase = false; + uint64_t BaseOffset; + if (I->isVirtual()) { + // Ignore virtual bases that we've already visited. + if (!VBases.insert(BaseDecl)) continue; + + BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl); + BaseDeclIsMorallyVirtual = true; + } else { const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); - uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base); - BuildVTT(Base, BaseOffset, /*BaseIsVirtual=*/false, MorallyVirtual); + BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); + + if (!Layout.getPrimaryBaseWasVirtual() && + Layout.getPrimaryBase() == BaseDecl) + BaseDeclIsNonVirtualPrimaryBase = true; } - } - /// VirtualVTTs - Add the VTT for each proper virtual base in inheritance - /// graph preorder. - void VirtualVTTs(const CXXRecordDecl *RD) { - for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), - e = RD->bases_end(); i != e; ++i) { - const CXXRecordDecl *Base = - cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); - if (i->isVirtual() && !SeenVBase.count(Base)) { - SeenVBase.insert(Base); - uint64_t BaseOffset = BLayout.getVBaseClassOffset(Base); - BuildVTT(Base, BaseOffset, /*BaseIsVirtual=*/true, false); - } - VirtualVTTs(Base); + // Itanium C++ ABI 2.6.2: + // Secondary virtual pointers: for each base class X which (a) has virtual + // bases or is reachable along a virtual path from D, and (b) is not a + // non-virtual primary base, the address of the virtual table for X-in-D + // or an appropriate construction virtual table. + if (!BaseDeclIsNonVirtualPrimaryBase && + (BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) { + // Add the vtable pointer. + AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTable, VTableClass, + AddressPoints); } + + // And lay out the secondary virtual pointers for the base class. + LayoutSecondaryVirtualPointers(BaseSubobject(BaseDecl, BaseOffset), + BaseDeclIsMorallyVirtual, VTable, + VTableClass, AddressPoints, VBases); } +} -public: - VTTBuilder(std::vector<llvm::Constant *> &inits, const CXXRecordDecl *c, - CodeGenModule &cgm, bool GenerateDefinition) - : Inits(inits), Class(c), CGM(cgm), - BLayout(cgm.getContext().getASTRecordLayout(c)), - AddressPoints(*cgm.getVtableInfo().AddressPoints[c]), - VMContext(cgm.getModule().getContext()), - GenerateDefinition(GenerateDefinition) { - - // First comes the primary virtual table pointer for the complete class... - ClassVtbl = GenerateDefinition ? CGM.getVtableInfo().getVtable(Class) : 0; +void +VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base, + llvm::Constant *VTable, + const AddressPointsMapTy& AddressPoints) { + VisitedVirtualBasesSetTy VBases; + LayoutSecondaryVirtualPointers(Base, /*BaseIsMorallyVirtual=*/false, + VTable, Base.getBase(), AddressPoints, VBases); +} - llvm::Constant *Init = BuildVtablePtr(ClassVtbl, Class, Class, 0); - Inits.push_back(Init); +void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD, + VisitedVirtualBasesSetTy &VBases) { + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); - // then the secondary VTTs... - SecondaryVTTs(Class); + // Check if this is a virtual base. + if (I->isVirtual()) { + // Check if we've seen this base before. + if (!VBases.insert(BaseDecl)) + continue; + + uint64_t BaseOffset = + MostDerivedClassLayout.getVBaseClassOffset(BaseDecl); + + LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true); + } + + // We only need to layout virtual VTTs for this base if it actually has + // virtual bases. + if (BaseDecl->getNumVBases()) + LayoutVirtualVTTs(BaseDecl, VBases); + } +} - // Make sure to clear the set of seen virtual bases. - SeenVBasesInSecondary.clear(); +void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) { + const CXXRecordDecl *RD = Base.getBase(); - // then the secondary vtable pointers... - Secondary(Class, ClassVtbl, Class); + // Itanium C++ ABI 2.6.2: + // An array of virtual table addresses, called the VTT, is declared for + // each class type that has indirect or direct virtual base classes. + if (RD->getNumVBases() == 0) + return; + + bool IsPrimaryVTT = Base.getBase() == MostDerivedClass; - // and last, the virtual VTTs. - VirtualVTTs(Class); + if (!IsPrimaryVTT) { + // Remember the sub-VTT index. + SubVTTIndicies[RD] = VTTComponents.size(); } + + AddressPointsMapTy AddressPoints; + llvm::Constant *VTable = GetAddrOfVTable(Base, BaseIsVirtual, AddressPoints); + + // Add the primary vtable pointer. + AddVTablePointer(Base, VTable, RD, AddressPoints); + + // Add the secondary VTTs. + LayoutSecondaryVTTs(Base); - llvm::DenseMap<const CXXRecordDecl *, uint64_t> &getSubVTTIndicies() { - return SubVTTIndicies; + // Add the secondary virtual pointers. + LayoutSecondaryVirtualPointers(Base, VTable, AddressPoints); + + // If this is the primary VTT, we want to lay out virtual VTTs as well. + if (IsPrimaryVTT) { + VisitedVirtualBasesSetTy VBases; + LayoutVirtualVTTs(Base.getBase(), VBases); } -}; +} + } llvm::GlobalVariable * -CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage, - bool GenerateDefinition, - const CXXRecordDecl *RD) { +CodeGenVTables::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage, + bool GenerateDefinition, + const CXXRecordDecl *RD) { // Only classes that have virtual bases need a VTT. if (RD->getNumVBases() == 0) return 0; @@ -311,13 +383,15 @@ CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage, const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); - std::vector<llvm::Constant *> inits; - VTTBuilder b(inits, RD, CGM, GenerateDefinition); + VTTBuilder Builder(CGM, RD, GenerateDefinition); + + const llvm::ArrayType *Type = + llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size()); - const llvm::ArrayType *Type = llvm::ArrayType::get(Int8PtrTy, inits.size()); llvm::Constant *Init = 0; if (GenerateDefinition) - Init = llvm::ConstantArray::get(Type, inits); + Init = llvm::ConstantArray::get(Type, Builder.getVTTComponents().data(), + Builder.getVTTComponents().size()); llvm::GlobalVariable *OldGV = GV; GV = new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true, @@ -336,26 +410,12 @@ CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage, return GV; } -CGVtableInfo::CtorVtableInfo -CGVtableInfo::getCtorVtable(const CXXRecordDecl *RD, - const BaseSubobject &Base, bool BaseIsVirtual) { - CtorVtableInfo Info; - - Info.Vtable = GenerateVtable(llvm::GlobalValue::InternalLinkage, - /*GenerateDefinition=*/true, - RD, Base.getBase(), Base.getBaseOffset(), - BaseIsVirtual, Info.AddressPoints); - return Info; -} - -llvm::GlobalVariable *CGVtableInfo::getVTT(const CXXRecordDecl *RD) { +llvm::GlobalVariable *CodeGenVTables::getVTT(const CXXRecordDecl *RD) { return GenerateVTT(llvm::GlobalValue::ExternalLinkage, /*GenerateDefinition=*/false, RD); - } - -bool CGVtableInfo::needsVTTParameter(GlobalDecl GD) { +bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); // We don't have any virtual bases, just return early. @@ -373,19 +433,17 @@ bool CGVtableInfo::needsVTTParameter(GlobalDecl GD) { return false; } -uint64_t CGVtableInfo::getSubVTTIndex(const CXXRecordDecl *RD, - const CXXRecordDecl *Base) { +uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD, + const CXXRecordDecl *Base) { ClassPairTy ClassPair(RD, Base); - SubVTTIndiciesTy::iterator I = - SubVTTIndicies.find(ClassPair); + SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassPair); if (I != SubVTTIndicies.end()) return I->second; - std::vector<llvm::Constant *> inits; - VTTBuilder Builder(inits, RD, CGM, /*GenerateDefinition=*/false); + VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false); - for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I = + for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::const_iterator I = Builder.getSubVTTIndicies().begin(), E = Builder.getSubVTTIndicies().end(); I != E; ++I) { // Insert all indices. @@ -399,3 +457,31 @@ uint64_t CGVtableInfo::getSubVTTIndex(const CXXRecordDecl *RD, return I->second; } + +uint64_t +CodeGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, + BaseSubobject Base) { + SecondaryVirtualPointerIndicesMapTy::iterator I = + SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base)); + + if (I != SecondaryVirtualPointerIndices.end()) + return I->second; + + VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false); + + // Insert all secondary vpointer indices. + for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I = + Builder.getSecondaryVirtualPointerIndices().begin(), + E = Builder.getSecondaryVirtualPointerIndices().end(); I != E; ++I) { + std::pair<const CXXRecordDecl *, BaseSubobject> Pair = + std::make_pair(RD, I->first); + + SecondaryVirtualPointerIndices.insert(std::make_pair(Pair, I->second)); + } + + I = SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base)); + assert(I != SecondaryVirtualPointerIndices.end() && "Did not find index!"); + + return I->second; +} + diff --git a/lib/CodeGen/CGVtable.cpp b/lib/CodeGen/CGVtable.cpp index df30f47..2d1c734 100644 --- a/lib/CodeGen/CGVtable.cpp +++ b/lib/CodeGen/CGVtable.cpp @@ -138,7 +138,7 @@ private: /// AddOverriders - Add the final overriders for this base subobject to the /// map of final overriders. - void AddOverriders(BaseSubobject Base,uint64_t OffsetInLayoutClass, + void AddOverriders(BaseSubobject Base, uint64_t OffsetInLayoutClass, SubobjectOffsetsMapTy &Offsets); /// PropagateOverrider - Propagate the NewMD overrider to all the functions @@ -636,6 +636,10 @@ public: reinterpret_cast<uintptr_t>(MD)); } + static VtableComponent getFromOpaqueInteger(uint64_t I) { + return VtableComponent(I); + } + /// getKind - Get the kind of this vtable component. Kind getKind() const { return (Kind)(Value & 0x7); @@ -725,6 +729,9 @@ private: return static_cast<uintptr_t>(Value & ~7ULL); } + explicit VtableComponent(uint64_t Value) + : Value(Value) { } + /// The kind is stored in the lower 3 bits of the value. For offsets, we /// make use of the facts that classes can't be larger than 2^55 bytes, /// so we store the offset in the lower part of the 61 bytes that remain. @@ -1091,9 +1098,15 @@ public: typedef llvm::SmallSetVector<const CXXRecordDecl *, 8> PrimaryBasesSetVectorTy; + typedef llvm::DenseMap<const CXXRecordDecl *, int64_t> + VBaseOffsetOffsetsMapTy; + + typedef llvm::DenseMap<BaseSubobject, uint64_t> + AddressPointsMapTy; + private: - /// VtableInfo - Global vtable information. - CGVtableInfo &VtableInfo; + /// VTables - Global vtable information. + CodeGenVTables &VTables; /// MostDerivedClass - The most derived class for which we're building this /// vtable. @@ -1122,9 +1135,6 @@ private: /// bases in this vtable. llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases; - typedef llvm::DenseMap<const CXXRecordDecl *, int64_t> - VBaseOffsetOffsetsMapTy; - /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for /// the most derived class. VBaseOffsetOffsetsMapTy VBaseOffsetOffsets; @@ -1133,29 +1143,8 @@ private: llvm::SmallVector<VtableComponent, 64> Components; /// AddressPoints - Address points for the vtable being built. - CGVtableInfo::AddressPointsMapTy AddressPoints; - - /// ReturnAdjustment - A return adjustment. - struct ReturnAdjustment { - /// NonVirtual - The non-virtual adjustment from the derived object to its - /// nearest virtual base. - int64_t NonVirtual; - - /// VBaseOffsetOffset - The offset (in bytes), relative to the address point - /// of the virtual base class offset. - int64_t VBaseOffsetOffset; - - ReturnAdjustment() : NonVirtual(0), VBaseOffsetOffset(0) { } - - bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; } + AddressPointsMapTy AddressPoints; - friend bool operator==(const ReturnAdjustment &LHS, - const ReturnAdjustment &RHS) { - return LHS.NonVirtual == RHS.NonVirtual && - LHS.VBaseOffsetOffset == RHS.VBaseOffsetOffset; - } - }; - /// MethodInfo - Contains information about a method in a vtable. /// (Used for computing 'this' pointer adjustment thunks. struct MethodInfo { @@ -1185,62 +1174,21 @@ private: /// currently building. MethodInfoMapTy MethodInfoMap; - /// ThisAdjustment - A 'this' pointer adjustment thunk. - struct ThisAdjustment { - /// NonVirtual - The non-virtual adjustment from the derived object to its - /// nearest virtual base. - int64_t NonVirtual; - - /// VCallOffsetOffset - The offset (in bytes), relative to the address point, - /// of the virtual call offset. - int64_t VCallOffsetOffset; - - ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { } - - bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; } - - friend bool operator==(const ThisAdjustment &LHS, - const ThisAdjustment &RHS) { - return LHS.NonVirtual == RHS.NonVirtual && - LHS.VCallOffsetOffset == RHS.VCallOffsetOffset; - } - }; - - /// ThunkInfo - The 'this' pointer adjustment as well as an optional return - /// adjustment for a thunk. - struct ThunkInfo { - /// This - The 'this' pointer adjustment. - ThisAdjustment This; - - /// Return - The return adjustment. - ReturnAdjustment Return; - - ThunkInfo() { } - - ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return) - : This(This), Return(Return) { } + typedef llvm::DenseMap<uint64_t, ThunkInfo> VtableThunksMapTy; - friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) { - return LHS.This == RHS.This && LHS.Return == RHS.Return; - } + /// VTableThunks - The thunks by vtable index in the vtable currently being + /// built. + VtableThunksMapTy VTableThunks; - bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); } - }; - - typedef llvm::DenseMap<uint64_t, ThunkInfo> ThunksInfoMapTy; + typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy; + typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy; - /// Thunks - The thunks by vtable index in the vtable currently being built. - ThunksInfoMapTy Thunks; - - typedef llvm::DenseMap<const CXXMethodDecl *, - llvm::SmallVector<ThunkInfo, 1> > MethodThunksMapTy; - - /// MethodThunks - A map that contains all the thunks needed for all methods - /// in the vtable currently being built. - MethodThunksMapTy MethodThunks; + /// Thunks - A map that contains all the thunks needed for all methods in the + /// most derived class for which the vtable is currently being built. + ThunksMapTy Thunks; /// AddThunk - Add a thunk for the given method. - void AddThunk(const CXXMethodDecl *MD, ThunkInfo &Thunk); + void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk); /// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the /// part of the vtable we're currently building. @@ -1341,10 +1289,10 @@ private: } public: - VtableBuilder(CGVtableInfo &VtableInfo, const CXXRecordDecl *MostDerivedClass, + VtableBuilder(CodeGenVTables &VTables, const CXXRecordDecl *MostDerivedClass, uint64_t MostDerivedClassOffset, bool MostDerivedClassIsVirtual, const CXXRecordDecl *LayoutClass) - : VtableInfo(VtableInfo), MostDerivedClass(MostDerivedClass), + : VTables(VTables), MostDerivedClass(MostDerivedClass), MostDerivedClassOffset(MostDerivedClassOffset), MostDerivedClassIsVirtual(MostDerivedClassIsVirtual), LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()), @@ -1353,15 +1301,57 @@ public: LayoutVtable(); } + ThunksMapTy::const_iterator thunks_begin() const { + return Thunks.begin(); + } + + ThunksMapTy::const_iterator thunks_end() const { + return Thunks.end(); + } + + const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const { + return VBaseOffsetOffsets; + } + + /// getNumVTableComponents - Return the number of components in the vtable + /// currently built. + uint64_t getNumVTableComponents() const { + return Components.size(); + } + + const uint64_t *vtable_components_data_begin() const { + return reinterpret_cast<const uint64_t *>(Components.begin()); + } + + const uint64_t *vtable_components_data_end() const { + return reinterpret_cast<const uint64_t *>(Components.end()); + } + + AddressPointsMapTy::const_iterator address_points_begin() const { + return AddressPoints.begin(); + } + + AddressPointsMapTy::const_iterator address_points_end() const { + return AddressPoints.end(); + } + + VtableThunksMapTy::const_iterator vtable_thunks_begin() const { + return VTableThunks.begin(); + } + + VtableThunksMapTy::const_iterator vtable_thunks_end() const { + return VTableThunks.end(); + } + /// dumpLayout - Dump the vtable layout. void dumpLayout(llvm::raw_ostream&); }; -void VtableBuilder::AddThunk(const CXXMethodDecl *MD, ThunkInfo &Thunk) { - if (isBuildingConstructorVtable()) - return; +void VtableBuilder::AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) { + assert(!isBuildingConstructorVtable() && + "Can't add thunks for construction vtable"); - llvm::SmallVector<ThunkInfo, 1> &ThunksVector = MethodThunks[MD]; + llvm::SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD]; // Check if we have this thunk already. if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) != @@ -1410,6 +1400,17 @@ void VtableBuilder::ComputeThisAdjustments() { Overriders.getOverrider(BaseSubobject(MD->getParent(), MethodInfo.BaseOffset), MD); + // Check if we need an adjustment at all. + if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) { + // When a return thunk is needed by a derived class that overrides a + // virtual base, gcc uses a virtual 'this' adjustment as well. + // While the thunk itself might be needed by vtables in subclasses or + // in construction vtables, there doesn't seem to be a reason for using + // the thunk in this vtable. Still, we do so to match gcc. + if (VTableThunks.lookup(VtableIndex).Return.isEmpty()) + continue; + } + ThisAdjustment ThisAdjustment = ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider); @@ -1417,22 +1418,48 @@ void VtableBuilder::ComputeThisAdjustments() { continue; // Add it. - Thunks[VtableIndex].This = ThisAdjustment; + VTableThunks[VtableIndex].This = ThisAdjustment; if (isa<CXXDestructorDecl>(MD)) { // Add an adjustment for the deleting destructor as well. - Thunks[VtableIndex + 1].This = ThisAdjustment; + VTableThunks[VtableIndex + 1].This = ThisAdjustment; } - - AddThunk(Overrider.Method, Thunks[VtableIndex]); } /// Clear the method info map. MethodInfoMap.clear(); + + if (isBuildingConstructorVtable()) { + // We don't need to store thunk information for construction vtables. + return; + } + + for (VtableThunksMapTy::const_iterator I = VTableThunks.begin(), + E = VTableThunks.end(); I != E; ++I) { + const VtableComponent &Component = Components[I->first]; + const ThunkInfo &Thunk = I->second; + const CXXMethodDecl *MD; + + switch (Component.getKind()) { + default: + llvm_unreachable("Unexpected vtable component kind!"); + case VtableComponent::CK_FunctionPointer: + MD = Component.getFunctionDecl(); + break; + case VtableComponent::CK_CompleteDtorPointer: + MD = Component.getDestructorDecl(); + break; + case VtableComponent::CK_DeletingDtorPointer: + // We've already added the thunk when we saw the complete dtor pointer. + continue; + } + + if (MD->getParent() == MostDerivedClass) + AddThunk(MD, Thunk); + } } -VtableBuilder::ReturnAdjustment -VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) { +ReturnAdjustment VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) { ReturnAdjustment Adjustment; if (!Offset.isEmpty()) { @@ -1444,8 +1471,8 @@ VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) { VBaseOffsetOffsets.lookup(Offset.VirtualBase); } else { Adjustment.VBaseOffsetOffset = - VtableInfo.getVirtualBaseOffsetOffset(Offset.DerivedClass, - Offset.VirtualBase); + VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass, + Offset.VirtualBase); } // FIXME: Once the assert in getVirtualBaseOffsetOffset is back again, @@ -1512,14 +1539,10 @@ VtableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base, return BaseOffset(); } -VtableBuilder::ThisAdjustment +ThisAdjustment VtableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD, uint64_t BaseOffsetInLayoutClass, FinalOverriders::OverriderInfo Overrider) { - // Check if we need an adjustment at all. - if (BaseOffsetInLayoutClass == Overrider.Offset) - return ThisAdjustment(); - // Ignore adjustments for pure virtual member functions. if (Overrider.Method->isPure()) return ThisAdjustment(); @@ -1576,7 +1599,7 @@ VtableBuilder::AddMethod(const CXXMethodDecl *MD, } else { // Add the return adjustment if necessary. if (!ReturnAdjustment.isEmpty()) - Thunks[Components.size()].Return = ReturnAdjustment; + VTableThunks[Components.size()].Return = ReturnAdjustment; // Add the function. Components.push_back(VtableComponent::MakeFunction(MD)); @@ -1776,6 +1799,25 @@ VtableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass, MethodInfoMap.insert(std::make_pair(MD, MethodInfo)); MethodInfoMap.erase(OverriddenMD); + + // If the overridden method exists in a virtual base class or a direct + // or indirect base class of a virtual base class, we need to emit a + // thunk if we ever have a class hierarchy where the base class is not + // a primary base in the complete object. + if (!isBuildingConstructorVtable() && OverriddenMD != MD) { + // Compute the this adjustment. + ThisAdjustment ThisAdjustment = + ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass, + Overrider); + + if (ThisAdjustment.VCallOffsetOffset && + Overrider.Method->getParent() == MostDerivedClass) { + // This is a virtual thunk for the most derived class, add it. + AddThunk(Overrider.Method, + ThunkInfo(ThisAdjustment, ReturnAdjustment())); + } + } + continue; } } @@ -1866,20 +1908,32 @@ VtableBuilder::LayoutPrimaryAndSecondaryVtables(BaseSubobject Base, // Compute 'this' pointer adjustments. ComputeThisAdjustments(); - // Record the address point. - AddressPoints.insert(std::make_pair(BaseSubobject(Base.getBase(), - OffsetInLayoutClass), - AddressPoint)); - - // Record the address points for all primary bases. - for (PrimaryBasesSetVectorTy::const_iterator I = PrimaryBases.begin(), - E = PrimaryBases.end(); I != E; ++I) { - const CXXRecordDecl *BaseDecl = *I; + // Add all address points. + const CXXRecordDecl *RD = Base.getBase(); + while (true) { + AddressPoints.insert(std::make_pair(BaseSubobject(RD, OffsetInLayoutClass), + AddressPoint)); + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); - // We know that all the primary bases have the same offset as the base - // subobject. - BaseSubobject PrimaryBase(BaseDecl, OffsetInLayoutClass); - AddressPoints.insert(std::make_pair(PrimaryBase, AddressPoint)); + if (!PrimaryBase) + break; + + if (Layout.getPrimaryBaseWasVirtual()) { + // Check if this virtual primary base is a primary base in the layout + // class. If it's not, we don't want to add it. + const ASTRecordLayout &LayoutClassLayout = + Context.getASTRecordLayout(LayoutClass); + + if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) != + OffsetInLayoutClass) { + // We don't want to add this class (or any of its primary bases). + break; + } + } + + RD = PrimaryBase; } bool BaseIsMorallyVirtual = BaseIsVirtual; @@ -2062,8 +2116,8 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) { // Since an address point can be shared by multiple subobjects, we use an // STL multimap. std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex; - for (CGVtableInfo::AddressPointsMapTy::const_iterator I = - AddressPoints.begin(), E = AddressPoints.end(); I != E; ++I) { + for (AddressPointsMapTy::const_iterator I = AddressPoints.begin(), + E = AddressPoints.end(); I != E; ++I) { const BaseSubobject& Base = I->first; uint64_t Index = I->second; @@ -2106,7 +2160,7 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) { if (MD->isPure()) Out << " [pure]"; - ThunkInfo Thunk = Thunks.lookup(I); + ThunkInfo Thunk = VTableThunks.lookup(I); if (!Thunk.isEmpty()) { // If this function pointer has a return adjustment, dump it. if (!Thunk.Return.isEmpty()) { @@ -2154,7 +2208,7 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) { if (DD->isPure()) Out << " [pure]"; - ThunkInfo Thunk = Thunks.lookup(I); + ThunkInfo Thunk = VTableThunks.lookup(I); if (!Thunk.isEmpty()) { // If this destructor has a 'this' pointer adjustment, dump it. if (!Thunk.This.isEmpty()) { @@ -2253,13 +2307,12 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) { Out << "\n"; } - if (!MethodThunks.empty()) { - + if (!Thunks.empty()) { // We store the method names in a map to get a stable order. std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls; - for (MethodThunksMapTy::const_iterator I = MethodThunks.begin(), - E = MethodThunks.end(); I != E; ++I) { + for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end(); + I != E; ++I) { const CXXMethodDecl *MD = I->first; std::string MethodName = PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, @@ -2273,7 +2326,9 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) { I != E; ++I) { const std::string &MethodName = I->first; const CXXMethodDecl *MD = I->second; - const llvm::SmallVector<ThunkInfo, 1> &ThunksVector = MethodThunks[MD]; + + ThunkInfoVectorTy ThunksVector = Thunks[MD]; + std::sort(ThunksVector.begin(), ThunksVector.end()); Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size(); Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n"; @@ -2283,1099 +2338,42 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) { Out << llvm::format("%4d | ", I); + // If this function pointer has a return pointer adjustment, dump it. + if (!Thunk.Return.isEmpty()) { + Out << "return adjustment: " << Thunk.This.NonVirtual; + Out << " non-virtual"; + if (Thunk.Return.VBaseOffsetOffset) { + Out << ", " << Thunk.Return.VBaseOffsetOffset; + Out << " vbase offset offset"; + } + + if (!Thunk.This.isEmpty()) + Out << "\n "; + } + // If this function pointer has a 'this' pointer adjustment, dump it. if (!Thunk.This.isEmpty()) { - Out << "this: "; - Out << Thunk.This.NonVirtual << " nv"; + Out << "this adjustment: "; + Out << Thunk.This.NonVirtual << " non-virtual"; if (Thunk.This.VCallOffsetOffset) { Out << ", " << Thunk.This.VCallOffsetOffset; - Out << " v"; + Out << " vcall offset offset"; } } Out << '\n'; } - } - } -} - -} - -namespace { -class OldVtableBuilder { -public: - /// Index_t - Vtable index type. - typedef uint64_t Index_t; - typedef std::vector<std::pair<GlobalDecl, - std::pair<GlobalDecl, ThunkAdjustment> > > - SavedAdjustmentsVectorTy; -private: - - // VtableComponents - The components of the vtable being built. - typedef llvm::SmallVector<llvm::Constant *, 64> VtableComponentsVectorTy; - VtableComponentsVectorTy VtableComponents; - - const bool BuildVtable; - - llvm::Type *Ptr8Ty; - - /// MostDerivedClass - The most derived class that this vtable is being - /// built for. - const CXXRecordDecl *MostDerivedClass; - - /// LayoutClass - The most derived class used for virtual base layout - /// information. - const CXXRecordDecl *LayoutClass; - /// LayoutOffset - The offset for Class in LayoutClass. - uint64_t LayoutOffset; - /// BLayout - Layout for the most derived class that this vtable is being - /// built for. - const ASTRecordLayout &BLayout; - llvm::SmallSet<const CXXRecordDecl *, 32> IndirectPrimary; - llvm::SmallSet<const CXXRecordDecl *, 32> SeenVBase; - llvm::Constant *rtti; - llvm::LLVMContext &VMContext; - CodeGenModule &CGM; // Per-module state. - - llvm::DenseMap<const CXXMethodDecl *, Index_t> VCall; - llvm::DenseMap<GlobalDecl, Index_t> VCallOffset; - llvm::DenseMap<GlobalDecl, Index_t> VCallOffsetForVCall; - // This is the offset to the nearest virtual base - llvm::DenseMap<const CXXMethodDecl *, Index_t> NonVirtualOffset; - llvm::DenseMap<const CXXRecordDecl *, Index_t> VBIndex; - - /// PureVirtualFunction - Points to __cxa_pure_virtual. - llvm::Constant *PureVirtualFn; - - /// VtableMethods - A data structure for keeping track of methods in a vtable. - /// Can add methods, override methods and iterate in vtable order. - class VtableMethods { - // MethodToIndexMap - Maps from a global decl to the index it has in the - // Methods vector. - llvm::DenseMap<GlobalDecl, uint64_t> MethodToIndexMap; - - /// Methods - The methods, in vtable order. - typedef llvm::SmallVector<GlobalDecl, 16> MethodsVectorTy; - MethodsVectorTy Methods; - MethodsVectorTy OrigMethods; - - public: - /// AddMethod - Add a method to the vtable methods. - void AddMethod(GlobalDecl GD) { - assert(!MethodToIndexMap.count(GD) && - "Method has already been added!"); - - MethodToIndexMap[GD] = Methods.size(); - Methods.push_back(GD); - OrigMethods.push_back(GD); - } - - /// OverrideMethod - Replace a method with another. - void OverrideMethod(GlobalDecl OverriddenGD, GlobalDecl GD) { - llvm::DenseMap<GlobalDecl, uint64_t>::iterator i - = MethodToIndexMap.find(OverriddenGD); - assert(i != MethodToIndexMap.end() && "Did not find entry!"); - - // Get the index of the old decl. - uint64_t Index = i->second; - - // Replace the old decl with the new decl. - Methods[Index] = GD; - - // And add the new. - MethodToIndexMap[GD] = Index; - } - - /// getIndex - Gives the index of a passed in GlobalDecl. Returns false if - /// the index couldn't be found. - bool getIndex(GlobalDecl GD, uint64_t &Index) const { - llvm::DenseMap<GlobalDecl, uint64_t>::const_iterator i - = MethodToIndexMap.find(GD); - - if (i == MethodToIndexMap.end()) - return false; - - Index = i->second; - return true; - } - - GlobalDecl getOrigMethod(uint64_t Index) const { - return OrigMethods[Index]; - } - - MethodsVectorTy::size_type size() const { - return Methods.size(); - } - - void clear() { - MethodToIndexMap.clear(); - Methods.clear(); - OrigMethods.clear(); - } - - GlobalDecl operator[](uint64_t Index) const { - return Methods[Index]; - } - }; - - /// Methods - The vtable methods we're currently building. - VtableMethods Methods; - - /// ThisAdjustments - For a given index in the vtable, contains the 'this' - /// pointer adjustment needed for a method. - typedef llvm::DenseMap<uint64_t, ThunkAdjustment> ThisAdjustmentsMapTy; - ThisAdjustmentsMapTy ThisAdjustments; - - SavedAdjustmentsVectorTy SavedAdjustments; - - /// BaseReturnTypes - Contains the base return types of methods who have been - /// overridden with methods whose return types require adjustment. Used for - /// generating covariant thunk information. - typedef llvm::DenseMap<uint64_t, CanQualType> BaseReturnTypesMapTy; - BaseReturnTypesMapTy BaseReturnTypes; - - std::vector<Index_t> VCalls; - - typedef std::pair<const CXXRecordDecl *, uint64_t> CtorVtable_t; - // subAddressPoints - Used to hold the AddressPoints (offsets) into the built - // vtable for use in computing the initializers for the VTT. - llvm::DenseMap<CtorVtable_t, int64_t> &subAddressPoints; - - /// AddressPoints - Address points for this vtable. - CGVtableInfo::AddressPointsMapTy& AddressPoints; - - typedef CXXRecordDecl::method_iterator method_iter; - const uint32_t LLVMPointerWidth; - Index_t extra; - typedef std::vector<std::pair<const CXXRecordDecl *, int64_t> > Path_t; - static llvm::DenseMap<CtorVtable_t, int64_t>& - AllocAddressPoint(CodeGenModule &cgm, const CXXRecordDecl *l, - const CXXRecordDecl *c) { - CGVtableInfo::AddrMap_t *&oref = cgm.getVtableInfo().AddressPoints[l]; - if (oref == 0) - oref = new CGVtableInfo::AddrMap_t; - - llvm::DenseMap<CtorVtable_t, int64_t> *&ref = (*oref)[c]; - if (ref == 0) - ref = new llvm::DenseMap<CtorVtable_t, int64_t>; - return *ref; - } - - bool DclIsSame(const FunctionDecl *New, const FunctionDecl *Old) { - FunctionTemplateDecl *OldTemplate = Old->getDescribedFunctionTemplate(); - FunctionTemplateDecl *NewTemplate = New->getDescribedFunctionTemplate(); - - // C++ [temp.fct]p2: - // A function template can be overloaded with other function templates - // and with normal (non-template) functions. - if ((OldTemplate == 0) != (NewTemplate == 0)) - return false; - - // Is the function New an overload of the function Old? - QualType OldQType = CGM.getContext().getCanonicalType(Old->getType()); - QualType NewQType = CGM.getContext().getCanonicalType(New->getType()); - - // Compare the signatures (C++ 1.3.10) of the two functions to - // determine whether they are overloads. If we find any mismatch - // in the signature, they are overloads. - - // If either of these functions is a K&R-style function (no - // prototype), then we consider them to have matching signatures. - if (isa<FunctionNoProtoType>(OldQType.getTypePtr()) || - isa<FunctionNoProtoType>(NewQType.getTypePtr())) - return true; - - FunctionProtoType* OldType = cast<FunctionProtoType>(OldQType); - FunctionProtoType* NewType = cast<FunctionProtoType>(NewQType); - - // The signature of a function includes the types of its - // parameters (C++ 1.3.10), which includes the presence or absence - // of the ellipsis; see C++ DR 357). - if (OldQType != NewQType && - (OldType->getNumArgs() != NewType->getNumArgs() || - OldType->isVariadic() != NewType->isVariadic() || - !std::equal(OldType->arg_type_begin(), OldType->arg_type_end(), - NewType->arg_type_begin()))) - return false; - -#if 0 - // C++ [temp.over.link]p4: - // The signature of a function template consists of its function - // signature, its return type and its template parameter list. The names - // of the template parameters are significant only for establishing the - // relationship between the template parameters and the rest of the - // signature. - // - // We check the return type and template parameter lists for function - // templates first; the remaining checks follow. - if (NewTemplate && - (!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(), - OldTemplate->getTemplateParameters(), - TPL_TemplateMatch) || - OldType->getResultType() != NewType->getResultType())) - return false; -#endif - - // If the function is a class member, its signature includes the - // cv-qualifiers (if any) on the function itself. - // - // As part of this, also check whether one of the member functions - // is static, in which case they are not overloads (C++ - // 13.1p2). While not part of the definition of the signature, - // this check is important to determine whether these functions - // can be overloaded. - const CXXMethodDecl* OldMethod = dyn_cast<CXXMethodDecl>(Old); - const CXXMethodDecl* NewMethod = dyn_cast<CXXMethodDecl>(New); - if (OldMethod && NewMethod && - !OldMethod->isStatic() && !NewMethod->isStatic() && - OldMethod->getTypeQualifiers() != NewMethod->getTypeQualifiers()) - return false; - - // The signatures match; this is not an overload. - return true; - } - - typedef llvm::DenseMap<const CXXMethodDecl *, const CXXMethodDecl*> - ForwardUnique_t; - ForwardUnique_t ForwardUnique; - llvm::DenseMap<const CXXMethodDecl*, const CXXMethodDecl*> UniqueOverrider; - - void BuildUniqueOverrider(const CXXMethodDecl *U, const CXXMethodDecl *MD) { - const CXXMethodDecl *PrevU = UniqueOverrider[MD]; - assert(U && "no unique overrider"); - if (PrevU == U) - return; - if (PrevU != U && PrevU != 0) { - // If already set, note the two sets as the same - if (0) - printf("%s::%s same as %s::%s\n", - PrevU->getParent()->getNameAsString().c_str(), - PrevU->getNameAsString().c_str(), - U->getParent()->getNameAsString().c_str(), - U->getNameAsString().c_str()); - ForwardUnique[PrevU] = U; - return; - } - - // Not set, set it now - if (0) - printf("marking %s::%s %p override as %s::%s\n", - MD->getParent()->getNameAsString().c_str(), - MD->getNameAsString().c_str(), - (void*)MD, - U->getParent()->getNameAsString().c_str(), - U->getNameAsString().c_str()); - UniqueOverrider[MD] = U; - - for (CXXMethodDecl::method_iterator mi = MD->begin_overridden_methods(), - me = MD->end_overridden_methods(); mi != me; ++mi) { - BuildUniqueOverrider(U, *mi); - } - } - - void BuildUniqueOverriders(const CXXRecordDecl *RD) { - if (0) printf("walking %s\n", RD->getNameAsCString()); - for (CXXRecordDecl::method_iterator i = RD->method_begin(), - e = RD->method_end(); i != e; ++i) { - const CXXMethodDecl *MD = *i; - if (!MD->isVirtual()) - continue; - - if (UniqueOverrider[MD] == 0) { - // Only set this, if it hasn't been set yet. - BuildUniqueOverrider(MD, MD); - if (0) - printf("top set is %s::%s %p\n", - MD->getParent()->getNameAsString().c_str(), - MD->getNameAsString().c_str(), - (void*)MD); - ForwardUnique[MD] = MD; - } - } - for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), - e = RD->bases_end(); i != e; ++i) { - const CXXRecordDecl *Base = - cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); - BuildUniqueOverriders(Base); - } - } - - static int DclCmp(const void *p1, const void *p2) { - const CXXMethodDecl *MD1 = *(const CXXMethodDecl *const *)p1; - const CXXMethodDecl *MD2 = *(const CXXMethodDecl *const *)p2; - - return (DeclarationName::compare(MD1->getDeclName(), MD2->getDeclName())); - } - - void MergeForwarding() { - typedef llvm::SmallVector<const CXXMethodDecl *, 100> A_t; - A_t A; - for (ForwardUnique_t::iterator I = ForwardUnique.begin(), - E = ForwardUnique.end(); I != E; ++I) { - if (I->first == I->second) - // Only add the roots of all trees - A.push_back(I->first); - } - llvm::array_pod_sort(A.begin(), A.end(), DclCmp); - for (A_t::iterator I = A.begin(), - E = A.end(); I != E; ++I) { - A_t::iterator J = I; - while (++J != E && DclCmp(I, J) == 0) - if (DclIsSame(*I, *J)) { - if (0) printf("connecting %s\n", (*I)->getNameAsString().c_str()); - ForwardUnique[*J] = *I; - } - } - } - - const CXXMethodDecl *getUnique(const CXXMethodDecl *MD) { - const CXXMethodDecl *U = UniqueOverrider[MD]; - assert(U && "unique overrider not found"); - while (ForwardUnique.count(U)) { - const CXXMethodDecl *NU = ForwardUnique[U]; - if (NU == U) break; - U = NU; - } - return U; - } - - GlobalDecl getUnique(GlobalDecl GD) { - const CXXMethodDecl *Unique = getUnique(cast<CXXMethodDecl>(GD.getDecl())); - - if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Unique)) - return GlobalDecl(CD, GD.getCtorType()); - - if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(Unique)) - return GlobalDecl(DD, GD.getDtorType()); - - return Unique; - } - - /// getPureVirtualFn - Return the __cxa_pure_virtual function. - llvm::Constant* getPureVirtualFn() { - if (!PureVirtualFn) { - const llvm::FunctionType *Ty = - llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), - /*isVarArg=*/false); - PureVirtualFn = wrap(CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual")); - } - - return PureVirtualFn; - } - -public: - OldVtableBuilder(const CXXRecordDecl *MostDerivedClass, - const CXXRecordDecl *l, uint64_t lo, CodeGenModule &cgm, - bool build, CGVtableInfo::AddressPointsMapTy& AddressPoints) - : BuildVtable(build), MostDerivedClass(MostDerivedClass), LayoutClass(l), - LayoutOffset(lo), BLayout(cgm.getContext().getASTRecordLayout(l)), - rtti(0), VMContext(cgm.getModule().getContext()),CGM(cgm), - PureVirtualFn(0), - subAddressPoints(AllocAddressPoint(cgm, l, MostDerivedClass)), - AddressPoints(AddressPoints), - LLVMPointerWidth(cgm.getContext().Target.getPointerWidth(0)) - { - Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0); - if (BuildVtable) { - QualType ClassType = CGM.getContext().getTagDeclType(MostDerivedClass); - rtti = CGM.GetAddrOfRTTIDescriptor(ClassType); - } - BuildUniqueOverriders(MostDerivedClass); - MergeForwarding(); - } - - // getVtableComponents - Returns a reference to the vtable components. - const VtableComponentsVectorTy &getVtableComponents() const { - return VtableComponents; - } - - llvm::DenseMap<const CXXRecordDecl *, uint64_t> &getVBIndex() - { return VBIndex; } - - SavedAdjustmentsVectorTy &getSavedAdjustments() - { return SavedAdjustments; } - - llvm::Constant *wrap(Index_t i) { - llvm::Constant *m; - m = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), i); - return llvm::ConstantExpr::getIntToPtr(m, Ptr8Ty); - } - - llvm::Constant *wrap(llvm::Constant *m) { - return llvm::ConstantExpr::getBitCast(m, Ptr8Ty); - } - -//#define D1(x) -#define D1(X) do { if (getenv("CLANG_VTABLE_DEBUG")) { X; } } while (0) - - void GenerateVBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset, - bool updateVBIndex, Index_t current_vbindex) { - for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), - e = RD->bases_end(); i != e; ++i) { - const CXXRecordDecl *Base = - cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); - Index_t next_vbindex = current_vbindex; - if (i->isVirtual() && !SeenVBase.count(Base)) { - SeenVBase.insert(Base); - if (updateVBIndex) { - next_vbindex = (ssize_t)(-(VCalls.size()*LLVMPointerWidth/8) - - 3*LLVMPointerWidth/8); - VBIndex[Base] = next_vbindex; - } - int64_t BaseOffset = -(Offset/8) + BLayout.getVBaseClassOffset(Base)/8; - VCalls.push_back((0?700:0) + BaseOffset); - D1(printf(" vbase for %s at %d delta %d most derived %s\n", - Base->getNameAsCString(), - (int)-VCalls.size()-3, (int)BaseOffset, - MostDerivedClass->getNameAsCString())); - } - // We also record offsets for non-virtual bases to closest enclosing - // virtual base. We do this so that we don't have to search - // for the nearst virtual base class when generating thunks. - if (updateVBIndex && VBIndex.count(Base) == 0) - VBIndex[Base] = next_vbindex; - GenerateVBaseOffsets(Base, Offset, updateVBIndex, next_vbindex); - } - } - - void StartNewTable() { - SeenVBase.clear(); - } - - Index_t getNVOffset_1(const CXXRecordDecl *D, const CXXRecordDecl *B, - Index_t Offset = 0) { - - if (B == D) - return Offset; - - const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D); - for (CXXRecordDecl::base_class_const_iterator i = D->bases_begin(), - e = D->bases_end(); i != e; ++i) { - const CXXRecordDecl *Base = - cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); - int64_t BaseOffset = 0; - if (!i->isVirtual()) - BaseOffset = Offset + Layout.getBaseClassOffset(Base); - int64_t o = getNVOffset_1(Base, B, BaseOffset); - if (o >= 0) - return o; - } - - return -1; - } - - /// getNVOffset - Returns the non-virtual offset for the given (B) base of the - /// derived class D. - Index_t getNVOffset(QualType qB, QualType qD) { - qD = qD->getPointeeType(); - qB = qB->getPointeeType(); - CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl()); - CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl()); - int64_t o = getNVOffset_1(D, B); - if (o >= 0) - return o; - - assert(false && "FIXME: non-virtual base not found"); - return 0; - } - - /// getVbaseOffset - Returns the index into the vtable for the virtual base - /// offset for the given (B) virtual base of the derived class D. - Index_t getVbaseOffset(QualType qB, QualType qD) { - qD = qD->getPointeeType(); - qB = qB->getPointeeType(); - CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl()); - CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl()); - if (D != MostDerivedClass) - return CGM.getVtableInfo().getVirtualBaseOffsetOffset(D, B); - llvm::DenseMap<const CXXRecordDecl *, Index_t>::iterator i; - i = VBIndex.find(B); - if (i != VBIndex.end()) - return i->second; - - assert(false && "FIXME: Base not found"); - return 0; - } - - bool OverrideMethod(GlobalDecl GD, bool MorallyVirtual, - Index_t OverrideOffset, Index_t Offset, - int64_t CurrentVBaseOffset); - - /// AppendMethods - Append the current methods to the vtable. - void AppendMethodsToVtable(); - - llvm::Constant *WrapAddrOf(GlobalDecl GD) { - const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); - - const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVtable(MD); - - return wrap(CGM.GetAddrOfFunction(GD, Ty)); - } - - void OverrideMethods(Path_t *Path, bool MorallyVirtual, int64_t Offset, - int64_t CurrentVBaseOffset) { - for (Path_t::reverse_iterator i = Path->rbegin(), - e = Path->rend(); i != e; ++i) { - const CXXRecordDecl *RD = i->first; - int64_t OverrideOffset = i->second; - for (method_iter mi = RD->method_begin(), me = RD->method_end(); mi != me; - ++mi) { - const CXXMethodDecl *MD = *mi; - - if (!MD->isVirtual()) - continue; - - if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { - // Override both the complete and the deleting destructor. - GlobalDecl CompDtor(DD, Dtor_Complete); - OverrideMethod(CompDtor, MorallyVirtual, OverrideOffset, Offset, - CurrentVBaseOffset); - - GlobalDecl DeletingDtor(DD, Dtor_Deleting); - OverrideMethod(DeletingDtor, MorallyVirtual, OverrideOffset, Offset, - CurrentVBaseOffset); - } else { - OverrideMethod(MD, MorallyVirtual, OverrideOffset, Offset, - CurrentVBaseOffset); - } - } - } - } - - void AddMethod(const GlobalDecl GD, bool MorallyVirtual, Index_t Offset, - int64_t CurrentVBaseOffset) { - // If we can find a previously allocated slot for this, reuse it. - if (OverrideMethod(GD, MorallyVirtual, Offset, Offset, - CurrentVBaseOffset)) - return; - - D1(printf(" vfn for %s at %d\n", - dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsString().c_str(), - (int)Methods.size())); - - // We didn't find an entry in the vtable that we could use, add a new - // entry. - Methods.AddMethod(GD); - - VCallOffset[GD] = Offset/8 - CurrentVBaseOffset/8; - - if (MorallyVirtual) { - GlobalDecl UGD = getUnique(GD); - const CXXMethodDecl *UMD = cast<CXXMethodDecl>(UGD.getDecl()); - - assert(UMD && "final overrider not found"); - - Index_t &idx = VCall[UMD]; - // Allocate the first one, after that, we reuse the previous one. - if (idx == 0) { - VCallOffsetForVCall[UGD] = Offset/8; - NonVirtualOffset[UMD] = Offset/8 - CurrentVBaseOffset/8; - idx = VCalls.size()+1; - VCalls.push_back(Offset/8 - CurrentVBaseOffset/8); - D1(printf(" vcall for %s at %d with delta %d\n", - dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsString().c_str(), - (int)-VCalls.size()-3, (int)VCalls[idx-1])); - } - } - } - - void AddMethods(const CXXRecordDecl *RD, bool MorallyVirtual, - Index_t Offset, int64_t CurrentVBaseOffset) { - for (method_iter mi = RD->method_begin(), me = RD->method_end(); mi != me; - ++mi) { - const CXXMethodDecl *MD = *mi; - if (!MD->isVirtual()) - continue; - if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { - // For destructors, add both the complete and the deleting destructor - // to the vtable. - AddMethod(GlobalDecl(DD, Dtor_Complete), MorallyVirtual, Offset, - CurrentVBaseOffset); - AddMethod(GlobalDecl(DD, Dtor_Deleting), MorallyVirtual, Offset, - CurrentVBaseOffset); - } else - AddMethod(MD, MorallyVirtual, Offset, CurrentVBaseOffset); - } - } + Out << '\n'; - void NonVirtualBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout, - const CXXRecordDecl *PrimaryBase, - bool PrimaryBaseWasVirtual, bool MorallyVirtual, - int64_t Offset, int64_t CurrentVBaseOffset, - Path_t *Path) { - Path->push_back(std::make_pair(RD, Offset)); - for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), - e = RD->bases_end(); i != e; ++i) { - if (i->isVirtual()) - continue; - const CXXRecordDecl *Base = - cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); - uint64_t o = Offset + Layout.getBaseClassOffset(Base); - StartNewTable(); - GenerateVtableForBase(Base, o, MorallyVirtual, false, - true, Base == PrimaryBase && !PrimaryBaseWasVirtual, - CurrentVBaseOffset, Path); } - Path->pop_back(); } - -// #define D(X) do { X; } while (0) -#define D(X) - - void insertVCalls(int InsertionPoint) { - D1(printf("============= combining vbase/vcall\n")); - D(VCalls.insert(VCalls.begin(), 673)); - D(VCalls.push_back(672)); - - VtableComponents.insert(VtableComponents.begin() + InsertionPoint, - VCalls.size(), 0); - if (BuildVtable) { - // The vcalls come first... - for (std::vector<Index_t>::reverse_iterator i = VCalls.rbegin(), - e = VCalls.rend(); - i != e; ++i) - VtableComponents[InsertionPoint++] = wrap((0?600:0) + *i); - } - VCalls.clear(); - VCall.clear(); - VCallOffsetForVCall.clear(); - VCallOffset.clear(); - NonVirtualOffset.clear(); - } - - void AddAddressPoints(const CXXRecordDecl *RD, uint64_t Offset, - Index_t AddressPoint) { - D1(printf("XXX address point for %s in %s layout %s at offset %d is %d\n", - RD->getNameAsCString(), MostDerivedClass->getNameAsCString(), - LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint)); - subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint; - AddressPoints[BaseSubobject(RD, Offset)] = AddressPoint; - - // Now also add the address point for all our primary bases. - while (1) { - const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); - RD = Layout.getPrimaryBase(); - const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual(); - // FIXME: Double check this. - if (RD == 0) - break; - if (PrimaryBaseWasVirtual && - BLayout.getVBaseClassOffset(RD) != Offset) - break; - D1(printf("XXX address point for %s in %s layout %s at offset %d is %d\n", - RD->getNameAsCString(), MostDerivedClass->getNameAsCString(), - LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint)); - subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint; - AddressPoints[BaseSubobject(RD, Offset)] = AddressPoint; - } - } - - - void FinishGenerateVtable(const CXXRecordDecl *RD, - const ASTRecordLayout &Layout, - const CXXRecordDecl *PrimaryBase, - bool ForNPNVBases, bool WasPrimaryBase, - bool PrimaryBaseWasVirtual, - bool MorallyVirtual, int64_t Offset, - bool ForVirtualBase, int64_t CurrentVBaseOffset, - Path_t *Path) { - bool alloc = false; - if (Path == 0) { - alloc = true; - Path = new Path_t; - } - - StartNewTable(); - extra = 0; - Index_t AddressPoint = 0; - int VCallInsertionPoint = 0; - if (!ForNPNVBases || !WasPrimaryBase) { - bool DeferVCalls = MorallyVirtual || ForVirtualBase; - VCallInsertionPoint = VtableComponents.size(); - if (!DeferVCalls) { - insertVCalls(VCallInsertionPoint); - } else - // FIXME: just for extra, or for all uses of VCalls.size post this? - extra = -VCalls.size(); - - // Add the offset to top. - VtableComponents.push_back(BuildVtable ? wrap(-((Offset-LayoutOffset)/8)) : 0); - - // Add the RTTI information. - VtableComponents.push_back(rtti); - - AddressPoint = VtableComponents.size(); - - AppendMethodsToVtable(); - } - - // and then the non-virtual bases. - NonVirtualBases(RD, Layout, PrimaryBase, PrimaryBaseWasVirtual, - MorallyVirtual, Offset, CurrentVBaseOffset, Path); - - if (ForVirtualBase) { - // FIXME: We're adding to VCalls in callers, we need to do the overrides - // in the inner part, so that we know the complete set of vcalls during - // the build and don't have to insert into methods. Saving out the - // AddressPoint here, would need to be fixed, if we didn't do that. Also - // retroactively adding vcalls for overrides later wind up in the wrong - // place, the vcall slot has to be alloted during the walk of the base - // when the function is first introduces. - AddressPoint += VCalls.size(); - insertVCalls(VCallInsertionPoint); - } - - if (!ForNPNVBases || !WasPrimaryBase) - AddAddressPoints(RD, Offset, AddressPoint); - - if (alloc) { - delete Path; - } - } - - void Primaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset, - bool updateVBIndex, Index_t current_vbindex, - int64_t CurrentVBaseOffset) { - if (!RD->isDynamicClass()) - return; - - const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); - const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); - const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual(); - - // vtables are composed from the chain of primaries. - if (PrimaryBase && !PrimaryBaseWasVirtual) { - D1(printf(" doing primaries for %s most derived %s\n", - RD->getNameAsCString(), MostDerivedClass->getNameAsCString())); - Primaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset, - updateVBIndex, current_vbindex, CurrentVBaseOffset); - } - - D1(printf(" doing vcall entries for %s most derived %s\n", - RD->getNameAsCString(), MostDerivedClass->getNameAsCString())); - - // And add the virtuals for the class to the primary vtable. - AddMethods(RD, MorallyVirtual, Offset, CurrentVBaseOffset); - } - - void VBPrimaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset, - bool updateVBIndex, Index_t current_vbindex, - bool RDisVirtualBase, int64_t CurrentVBaseOffset, - bool bottom) { - if (!RD->isDynamicClass()) - return; - - const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); - const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); - const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual(); - - // vtables are composed from the chain of primaries. - if (PrimaryBase) { - int BaseCurrentVBaseOffset = CurrentVBaseOffset; - if (PrimaryBaseWasVirtual) { - IndirectPrimary.insert(PrimaryBase); - BaseCurrentVBaseOffset = BLayout.getVBaseClassOffset(PrimaryBase); - } - - D1(printf(" doing primaries for %s most derived %s\n", - RD->getNameAsCString(), MostDerivedClass->getNameAsCString())); - - VBPrimaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset, - updateVBIndex, current_vbindex, PrimaryBaseWasVirtual, - BaseCurrentVBaseOffset, false); - } - - D1(printf(" doing vbase entries for %s most derived %s\n", - RD->getNameAsCString(), MostDerivedClass->getNameAsCString())); - GenerateVBaseOffsets(RD, Offset, updateVBIndex, current_vbindex); - - if (RDisVirtualBase || bottom) { - Primaries(RD, MorallyVirtual, Offset, updateVBIndex, current_vbindex, - CurrentVBaseOffset); - } - } - - void GenerateVtableForBase(const CXXRecordDecl *RD, int64_t Offset = 0, - bool MorallyVirtual = false, - bool ForVirtualBase = false, - bool ForNPNVBases = false, - bool WasPrimaryBase = true, - int CurrentVBaseOffset = 0, - Path_t *Path = 0) { - if (!RD->isDynamicClass()) - return; - - // Construction vtable don't need parts that have no virtual bases and - // aren't morally virtual. - if ((LayoutClass != MostDerivedClass) && - RD->getNumVBases() == 0 && !MorallyVirtual) - return; - - const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); - const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); - const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual(); - - extra = 0; - D1(printf("building entries for base %s most derived %s\n", - RD->getNameAsCString(), MostDerivedClass->getNameAsCString())); - - if (ForVirtualBase) - extra = VCalls.size(); - - if (!ForNPNVBases || !WasPrimaryBase) { - VBPrimaries(RD, MorallyVirtual, Offset, !ForVirtualBase, 0, - ForVirtualBase, CurrentVBaseOffset, true); - - if (Path) - OverrideMethods(Path, MorallyVirtual, Offset, CurrentVBaseOffset); - } - - FinishGenerateVtable(RD, Layout, PrimaryBase, ForNPNVBases, WasPrimaryBase, - PrimaryBaseWasVirtual, MorallyVirtual, Offset, - ForVirtualBase, CurrentVBaseOffset, Path); - } - - void GenerateVtableForVBases(const CXXRecordDecl *RD, - int64_t Offset = 0, - Path_t *Path = 0) { - bool alloc = false; - if (Path == 0) { - alloc = true; - Path = new Path_t; - } - // FIXME: We also need to override using all paths to a virtual base, - // right now, we just process the first path - Path->push_back(std::make_pair(RD, Offset)); - for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), - e = RD->bases_end(); i != e; ++i) { - const CXXRecordDecl *Base = - cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); - if (i->isVirtual() && !IndirectPrimary.count(Base)) { - // Mark it so we don't output it twice. - IndirectPrimary.insert(Base); - StartNewTable(); - VCall.clear(); - int64_t BaseOffset = BLayout.getVBaseClassOffset(Base); - int64_t CurrentVBaseOffset = BaseOffset; - D1(printf("vtable %s virtual base %s\n", - MostDerivedClass->getNameAsCString(), Base->getNameAsCString())); - GenerateVtableForBase(Base, BaseOffset, true, true, false, - true, CurrentVBaseOffset, Path); - } - int64_t BaseOffset; - if (i->isVirtual()) - BaseOffset = BLayout.getVBaseClassOffset(Base); - else { - const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); - BaseOffset = Offset + Layout.getBaseClassOffset(Base); - } - - if (Base->getNumVBases()) { - GenerateVtableForVBases(Base, BaseOffset, Path); - } - } - Path->pop_back(); - if (alloc) - delete Path; - } -}; -} // end anonymous namespace - -bool OldVtableBuilder::OverrideMethod(GlobalDecl GD, bool MorallyVirtual, - Index_t OverrideOffset, Index_t Offset, - int64_t CurrentVBaseOffset) { - const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); - - const bool isPure = MD->isPure(); - - // FIXME: Should OverrideOffset's be Offset? - - for (CXXMethodDecl::method_iterator mi = MD->begin_overridden_methods(), - e = MD->end_overridden_methods(); mi != e; ++mi) { - GlobalDecl OGD; - GlobalDecl OGD2; - - const CXXMethodDecl *OMD = *mi; - if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(OMD)) - OGD = GlobalDecl(DD, GD.getDtorType()); - else - OGD = OMD; - - // Check whether this is the method being overridden in this section of - // the vtable. - uint64_t Index; - if (!Methods.getIndex(OGD, Index)) - continue; - - OGD2 = OGD; - - // Get the original method, which we should be computing thunks, etc, - // against. - OGD = Methods.getOrigMethod(Index); - OMD = cast<CXXMethodDecl>(OGD.getDecl()); - - QualType ReturnType = - MD->getType()->getAs<FunctionType>()->getResultType(); - QualType OverriddenReturnType = - OMD->getType()->getAs<FunctionType>()->getResultType(); - - // Check if we need a return type adjustment. - if (!ComputeReturnAdjustmentBaseOffset(CGM.getContext(), MD, - OMD).isEmpty()) { - CanQualType &BaseReturnType = BaseReturnTypes[Index]; - - // Insert the base return type. - if (BaseReturnType.isNull()) - BaseReturnType = - CGM.getContext().getCanonicalType(OverriddenReturnType); - } - - Methods.OverrideMethod(OGD, GD); - - GlobalDecl UGD = getUnique(GD); - const CXXMethodDecl *UMD = cast<CXXMethodDecl>(UGD.getDecl()); - assert(UGD.getDecl() && "unique overrider not found"); - assert(UGD == getUnique(OGD) && "unique overrider not unique"); - - ThisAdjustments.erase(Index); - if (MorallyVirtual || VCall.count(UMD)) { - - Index_t &idx = VCall[UMD]; - if (idx == 0) { - VCallOffset[GD] = VCallOffset[OGD]; - // NonVirtualOffset[UMD] = CurrentVBaseOffset/8 - OverrideOffset/8; - NonVirtualOffset[UMD] = VCallOffset[OGD]; - VCallOffsetForVCall[UMD] = OverrideOffset/8; - idx = VCalls.size()+1; - VCalls.push_back(OverrideOffset/8 - CurrentVBaseOffset/8); - D1(printf(" vcall for %s at %d with delta %d most derived %s\n", - MD->getNameAsString().c_str(), (int)-idx-3, - (int)VCalls[idx-1], MostDerivedClass->getNameAsCString())); - } else { - VCallOffset[GD] = NonVirtualOffset[UMD]; - VCalls[idx-1] = -VCallOffsetForVCall[UGD] + OverrideOffset/8; - D1(printf(" vcall patch for %s at %d with delta %d most derived %s\n", - MD->getNameAsString().c_str(), (int)-idx-3, - (int)VCalls[idx-1], MostDerivedClass->getNameAsCString())); - } - int64_t NonVirtualAdjustment = -VCallOffset[OGD]; - QualType DerivedType = MD->getThisType(CGM.getContext()); - QualType BaseType = cast<const CXXMethodDecl>(OGD.getDecl())->getThisType(CGM.getContext()); - int64_t NonVirtualAdjustment2 = -(getNVOffset(BaseType, DerivedType)/8); - if (NonVirtualAdjustment2 != NonVirtualAdjustment) { - NonVirtualAdjustment = NonVirtualAdjustment2; - } - int64_t VirtualAdjustment = - -((idx + extra + 2) * LLVMPointerWidth / 8); - - // Optimize out virtual adjustments of 0. - if (VCalls[idx-1] == 0) - VirtualAdjustment = 0; - - ThunkAdjustment ThisAdjustment(NonVirtualAdjustment, - VirtualAdjustment); - - if (!isPure && !ThisAdjustment.isEmpty()) { - ThisAdjustments[Index] = ThisAdjustment; - SavedAdjustments.push_back( - std::make_pair(GD, std::make_pair(OGD, ThisAdjustment))); - } - return true; - } - - VCallOffset[GD] = VCallOffset[OGD2] - OverrideOffset/8; - - int64_t NonVirtualAdjustment = -VCallOffset[GD]; - QualType DerivedType = MD->getThisType(CGM.getContext()); - QualType BaseType = cast<const CXXMethodDecl>(OGD.getDecl())->getThisType(CGM.getContext()); - int64_t NonVirtualAdjustment2 = -(getNVOffset(BaseType, DerivedType)/8); - if (NonVirtualAdjustment2 != NonVirtualAdjustment) { - NonVirtualAdjustment = NonVirtualAdjustment2; - } - - if (NonVirtualAdjustment) { - ThunkAdjustment ThisAdjustment(NonVirtualAdjustment, 0); - - if (!isPure) { - ThisAdjustments[Index] = ThisAdjustment; - SavedAdjustments.push_back( - std::make_pair(GD, std::make_pair(OGD, ThisAdjustment))); - } - } - return true; - } - - return false; } - -void OldVtableBuilder::AppendMethodsToVtable() { - if (!BuildVtable) { - VtableComponents.insert(VtableComponents.end(), Methods.size(), - (llvm::Constant *)0); - ThisAdjustments.clear(); - BaseReturnTypes.clear(); - Methods.clear(); - return; - } - - // Reserve room in the vtable for our new methods. - VtableComponents.reserve(VtableComponents.size() + Methods.size()); - - for (unsigned i = 0, e = Methods.size(); i != e; ++i) { - GlobalDecl GD = Methods[i]; - const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); - - // Get the 'this' pointer adjustment. - ThunkAdjustment ThisAdjustment = ThisAdjustments.lookup(i); - - // Construct the return type adjustment. - ThunkAdjustment ReturnAdjustment; - - QualType BaseReturnType = BaseReturnTypes.lookup(i); - if (!BaseReturnType.isNull() && !MD->isPure()) { - QualType DerivedType = - MD->getType()->getAs<FunctionType>()->getResultType(); - - int64_t NonVirtualAdjustment = - getNVOffset(BaseReturnType, DerivedType) / 8; - - int64_t VirtualAdjustment = - getVbaseOffset(BaseReturnType, DerivedType); - - ReturnAdjustment = ThunkAdjustment(NonVirtualAdjustment, - VirtualAdjustment); - } - - llvm::Constant *Method = 0; - if (!ReturnAdjustment.isEmpty()) { - // Build a covariant thunk. - CovariantThunkAdjustment Adjustment(ThisAdjustment, ReturnAdjustment); - Method = wrap(CGM.GetAddrOfCovariantThunk(GD, Adjustment)); - } else if (!ThisAdjustment.isEmpty()) { - // Build a "regular" thunk. - Method = wrap(CGM.GetAddrOfThunk(GD, ThisAdjustment)); - } else if (MD->isPure()) { - // We have a pure virtual method. - Method = getPureVirtualFn(); - } else { - // We have a good old regular method. - Method = WrapAddrOf(GD); - } - - // Add the method to the vtable. - VtableComponents.push_back(Method); - } - - ThisAdjustments.clear(); - BaseReturnTypes.clear(); - - Methods.clear(); } -void CGVtableInfo::ComputeMethodVtableIndices(const CXXRecordDecl *RD) { +void CodeGenVTables::ComputeMethodVtableIndices(const CXXRecordDecl *RD) { // Itanium C++ ABI 2.5.2: // The order of the virtual function pointers in a virtual table is the @@ -3481,7 +2479,7 @@ void CGVtableInfo::ComputeMethodVtableIndices(const CXXRecordDecl *RD) { NumVirtualFunctionPointers[RD] = CurrentIndex; } -uint64_t CGVtableInfo::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) { +uint64_t CodeGenVTables::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) { llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I = NumVirtualFunctionPointers.find(RD); if (I != NumVirtualFunctionPointers.end()) @@ -3494,7 +2492,7 @@ uint64_t CGVtableInfo::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) { return I->second; } -uint64_t CGVtableInfo::getMethodVtableIndex(GlobalDecl GD) { +uint64_t CodeGenVTables::getMethodVtableIndex(GlobalDecl GD) { MethodVtableIndicesTy::iterator I = MethodVtableIndices.find(GD); if (I != MethodVtableIndices.end()) return I->second; @@ -3508,36 +2506,8 @@ uint64_t CGVtableInfo::getMethodVtableIndex(GlobalDecl GD) { return I->second; } -CGVtableInfo::AdjustmentVectorTy* -CGVtableInfo::getAdjustments(GlobalDecl GD) { - SavedAdjustmentsTy::iterator I = SavedAdjustments.find(GD); - if (I != SavedAdjustments.end()) - return &I->second; - - const CXXRecordDecl *RD = cast<CXXRecordDecl>(GD.getDecl()->getDeclContext()); - if (!SavedAdjustmentRecords.insert(RD).second) - return 0; - - AddressPointsMapTy AddressPoints; - OldVtableBuilder b(RD, RD, 0, CGM, false, AddressPoints); - D1(printf("vtable %s\n", RD->getNameAsCString())); - b.GenerateVtableForBase(RD); - b.GenerateVtableForVBases(RD); - - for (OldVtableBuilder::SavedAdjustmentsVectorTy::iterator - i = b.getSavedAdjustments().begin(), - e = b.getSavedAdjustments().end(); i != e; i++) - SavedAdjustments[i->first].push_back(i->second); - - I = SavedAdjustments.find(GD); - if (I != SavedAdjustments.end()) - return &I->second; - - return 0; -} - -int64_t CGVtableInfo::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD, - const CXXRecordDecl *VBase) { +int64_t CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD, + const CXXRecordDecl *VBase) { ClassPairTy ClassPair(RD, VBase); VirtualBaseClassOffsetOffsetsMapTy::iterator I = @@ -3549,7 +2519,6 @@ int64_t CGVtableInfo::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD, BaseSubobject(RD, 0), /*BaseIsVirtual=*/false, /*OffsetInLayoutClass=*/0); - for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I = Builder.getVBaseOffsetOffsets().begin(), @@ -3573,128 +2542,588 @@ int64_t CGVtableInfo::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD, return I->second; } -uint64_t CGVtableInfo::getVtableAddressPoint(const CXXRecordDecl *RD) { - uint64_t AddressPoint = - (*(*(CGM.getVtableInfo().AddressPoints[RD]))[RD])[std::make_pair(RD, 0)]; - +uint64_t +CodeGenVTables::getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD) { + uint64_t AddressPoint = AddressPoints.lookup(std::make_pair(RD, Base)); + assert(AddressPoint && "Address point must not be zero!"); + return AddressPoint; } -llvm::GlobalVariable * -CGVtableInfo::GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage, - bool GenerateDefinition, - const CXXRecordDecl *LayoutClass, - const CXXRecordDecl *RD, uint64_t Offset, - bool IsVirtual, - AddressPointsMapTy& AddressPoints) { - if (GenerateDefinition) { - if (LayoutClass == RD) { - assert(!IsVirtual && - "Can only have a virtual base in construction vtables!"); - assert(!Offset && - "Can only have a base offset in construction vtables!"); - } +llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD, + const ThunkInfo &Thunk) { + const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); + + // Compute the mangled name. + llvm::SmallString<256> Name; + if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD)) + getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), Thunk.This, + Name); + else + getMangleContext().mangleThunk(MD, Thunk, Name); + + const llvm::Type *Ty = getTypes().GetFunctionTypeForVtable(MD); + return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl()); +} + +static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF, + llvm::Value *Ptr, + int64_t NonVirtualAdjustment, + int64_t VirtualAdjustment) { + if (!NonVirtualAdjustment && !VirtualAdjustment) + return Ptr; + + const llvm::Type *Int8PtrTy = + llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + + llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy); + + if (NonVirtualAdjustment) { + // Do the non-virtual adjustment. + V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment); + } + + if (VirtualAdjustment) { + const llvm::Type *PtrDiffTy = + CGF.ConvertType(CGF.getContext().getPointerDiffType()); + + // Do the virtual adjustment. + llvm::Value *VTablePtrPtr = + CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo()); + + llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr); + + llvm::Value *OffsetPtr = + CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment); + + OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo()); + + // Load the adjustment offset from the vtable. + llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr); - VtableBuilder Builder(*this, RD, Offset, - /*MostDerivedClassIsVirtual=*/IsVirtual, - LayoutClass); + // Adjust our pointer. + V = CGF.Builder.CreateInBoundsGEP(V, Offset); + } + + // Cast back to the original type. + return CGF.Builder.CreateBitCast(V, Ptr->getType()); +} + +void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD, + const ThunkInfo &Thunk) { + const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); + const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); + QualType ResultType = FPT->getResultType(); + QualType ThisType = MD->getThisType(getContext()); - if (CGM.getLangOptions().DumpVtableLayouts) - Builder.dumpLayout(llvm::errs()); + FunctionArgList FunctionArgs; + + // FIXME: It would be nice if more of this code could be shared with + // CodeGenFunction::GenerateCode. + + // Create the implicit 'this' parameter declaration. + CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0, + MD->getLocation(), + &getContext().Idents.get("this"), + ThisType); + + // Add the 'this' parameter. + FunctionArgs.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType())); + + // Add the rest of the parameters. + for (FunctionDecl::param_const_iterator I = MD->param_begin(), + E = MD->param_end(); I != E; ++I) { + ParmVarDecl *Param = *I; + + FunctionArgs.push_back(std::make_pair(Param, Param->getType())); } + + StartFunction(GlobalDecl(), ResultType, Fn, FunctionArgs, SourceLocation()); - llvm::SmallString<256> OutName; - if (LayoutClass != RD) - CGM.getMangleContext().mangleCXXCtorVtable(LayoutClass, Offset / 8, - RD, OutName); - else - CGM.getMangleContext().mangleCXXVtable(RD, OutName); - llvm::StringRef Name = OutName.str(); + // Adjust the 'this' pointer if necessary. + llvm::Value *AdjustedThisPtr = + PerformTypeAdjustment(*this, LoadCXXThis(), + Thunk.This.NonVirtual, + Thunk.This.VCallOffsetOffset); + + CallArgList CallArgs; + + // Add our adjusted 'this' pointer. + CallArgs.push_back(std::make_pair(RValue::get(AdjustedThisPtr), ThisType)); - llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name); - if (GV == 0 || CGM.getVtableInfo().AddressPoints[LayoutClass] == 0 || - GV->isDeclaration()) { - OldVtableBuilder b(RD, LayoutClass, Offset, CGM, GenerateDefinition, - AddressPoints); + // Add the rest of the parameters. + for (FunctionDecl::param_const_iterator I = MD->param_begin(), + E = MD->param_end(); I != E; ++I) { + ParmVarDecl *Param = *I; + QualType ArgType = Param->getType(); + + // FIXME: Declaring a DeclRefExpr on the stack is kinda icky. + DeclRefExpr ArgExpr(Param, ArgType.getNonReferenceType(), SourceLocation()); + CallArgs.push_back(std::make_pair(EmitCallArg(&ArgExpr, ArgType), ArgType)); + } - D1(printf("vtable %s\n", RD->getNameAsCString())); - // First comes the vtables for all the non-virtual bases... - b.GenerateVtableForBase(RD, Offset); + // Get our callee. + const llvm::Type *Ty = + CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), + FPT->isVariadic()); + llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty); - // then the vtables for all the virtual bases. - b.GenerateVtableForVBases(RD, Offset); + const CGFunctionInfo &FnInfo = + CGM.getTypes().getFunctionInfo(ResultType, CallArgs, + FPT->getExtInfo()); + + // Now emit our call. + RValue RV = EmitCall(FnInfo, Callee, ReturnValueSlot(), CallArgs, MD); + + if (!Thunk.Return.isEmpty()) { + // Emit the return adjustment. + bool NullCheckValue = !ResultType->isReferenceType(); + + llvm::BasicBlock *AdjustNull = 0; + llvm::BasicBlock *AdjustNotNull = 0; + llvm::BasicBlock *AdjustEnd = 0; + + llvm::Value *ReturnValue = RV.getScalarVal(); - llvm::Constant *Init = 0; - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); - llvm::ArrayType *ArrayType = - llvm::ArrayType::get(Int8PtrTy, b.getVtableComponents().size()); - - if (GenerateDefinition) - Init = llvm::ConstantArray::get(ArrayType, &b.getVtableComponents()[0], - b.getVtableComponents().size()); - - llvm::GlobalVariable *OGV = GV; - - GV = new llvm::GlobalVariable(CGM.getModule(), ArrayType, - /*isConstant=*/true, Linkage, Init, Name); - CGM.setGlobalVisibility(GV, RD); - - if (OGV) { - GV->takeName(OGV); - llvm::Constant *NewPtr = - llvm::ConstantExpr::getBitCast(GV, OGV->getType()); - OGV->replaceAllUsesWith(NewPtr); - OGV->eraseFromParent(); + if (NullCheckValue) { + AdjustNull = createBasicBlock("adjust.null"); + AdjustNotNull = createBasicBlock("adjust.notnull"); + AdjustEnd = createBasicBlock("adjust.end"); + + llvm::Value *IsNull = Builder.CreateIsNull(ReturnValue); + Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull); + EmitBlock(AdjustNotNull); + } + + ReturnValue = PerformTypeAdjustment(*this, ReturnValue, + Thunk.Return.NonVirtual, + Thunk.Return.VBaseOffsetOffset); + + if (NullCheckValue) { + Builder.CreateBr(AdjustEnd); + EmitBlock(AdjustNull); + Builder.CreateBr(AdjustEnd); + EmitBlock(AdjustEnd); + + llvm::PHINode *PHI = Builder.CreatePHI(ReturnValue->getType()); + PHI->reserveOperandSpace(2); + PHI->addIncoming(ReturnValue, AdjustNotNull); + PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()), + AdjustNull); + ReturnValue = PHI; } + + RV = RValue::get(ReturnValue); } + + if (!ResultType->isVoidType()) + EmitReturnOfRValue(RV, ResultType); + + FinishFunction(); + + // Destroy the 'this' declaration. + CXXThisDecl->Destroy(getContext()); - return GV; + // Set the right linkage. + Fn->setLinkage(CGM.getFunctionLinkage(MD)); + + // Set the right visibility. + CGM.setGlobalVisibility(Fn, MD); } -void CGVtableInfo::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage, - const CXXRecordDecl *RD) { - llvm::GlobalVariable *&Vtable = Vtables[RD]; - if (Vtable) { - assert(Vtable->getInitializer() && "Vtable doesn't have a definition!"); +void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk) +{ + llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk); + const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); + + // Strip off a bitcast if we got one back. + if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) { + assert(CE->getOpcode() == llvm::Instruction::BitCast); + Entry = CE->getOperand(0); + } + + // There's already a declaration with the same name, check if it has the same + // type or if we need to replace it. + if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() != + CGM.getTypes().GetFunctionTypeForVtable(MD)) { + llvm::GlobalValue *OldThunkFn = cast<llvm::GlobalValue>(Entry); + + // If the types mismatch then we have to rewrite the definition. + assert(OldThunkFn->isDeclaration() && + "Shouldn't replace non-declaration"); + + // Remove the name from the old thunk function and get a new thunk. + OldThunkFn->setName(llvm::StringRef()); + Entry = CGM.GetAddrOfThunk(GD, Thunk); + + // If needed, replace the old thunk with a bitcast. + if (!OldThunkFn->use_empty()) { + llvm::Constant *NewPtrForOldDecl = + llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType()); + OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl); + } + + // Remove the old thunk. + OldThunkFn->eraseFromParent(); + } + + // Actually generate the thunk body. + llvm::Function *ThunkFn = cast<llvm::Function>(Entry); + CodeGenFunction(CGM).GenerateThunk(ThunkFn, GD, Thunk); +} + +void CodeGenVTables::EmitThunks(GlobalDecl GD) +{ + const CXXMethodDecl *MD = + cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl(); + + // We don't need to generate thunks for the base destructor. + if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) + return; + + const CXXRecordDecl *RD = MD->getParent(); + + // Compute VTable related info for this class. + ComputeVTableRelatedInformation(RD); + + ThunksMapTy::const_iterator I = Thunks.find(MD); + if (I == Thunks.end()) { + // We did not find a thunk for this method. return; } + + const ThunkInfoVectorTy &ThunkInfoVector = I->second; + for (unsigned I = 0, E = ThunkInfoVector.size(); I != E; ++I) + EmitThunk(GD, ThunkInfoVector[I]); +} + +void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) { + uint64_t *&LayoutData = VTableLayoutMap[RD]; - AddressPointsMapTy AddressPoints; - Vtable = GenerateVtable(Linkage, /*GenerateDefinition=*/true, RD, RD, 0, - /*IsVirtual=*/false, - AddressPoints); - GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD); + // Check if we've computed this information before. + if (LayoutData) + return; + + VtableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD); - for (CXXRecordDecl::method_iterator i = RD->method_begin(), - e = RD->method_end(); i != e; ++i) { - if (!(*i)->isVirtual()) - continue; - if(!(*i)->hasInlineBody() && !(*i)->isImplicit()) - continue; + // Add the VTable layout. + uint64_t NumVTableComponents = Builder.getNumVTableComponents(); + LayoutData = new uint64_t[NumVTableComponents + 1]; - if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(*i)) { - CGM.BuildThunksForVirtual(GlobalDecl(DD, Dtor_Complete)); - CGM.BuildThunksForVirtual(GlobalDecl(DD, Dtor_Deleting)); - } else { - CGM.BuildThunksForVirtual(GlobalDecl(*i)); + // Store the number of components. + LayoutData[0] = NumVTableComponents; + + // Store the components. + std::copy(Builder.vtable_components_data_begin(), + Builder.vtable_components_data_end(), + &LayoutData[1]); + + // Add the known thunks. + Thunks.insert(Builder.thunks_begin(), Builder.thunks_end()); + + // Add the thunks needed in this vtable. + assert(!VTableThunksMap.count(RD) && + "Thunks already exists for this vtable!"); + + VTableThunksTy &VTableThunks = VTableThunksMap[RD]; + VTableThunks.append(Builder.vtable_thunks_begin(), + Builder.vtable_thunks_end()); + + // Sort them. + std::sort(VTableThunks.begin(), VTableThunks.end()); + + // Add the address points. + for (VtableBuilder::AddressPointsMapTy::const_iterator I = + Builder.address_points_begin(), E = Builder.address_points_end(); + I != E; ++I) { + + uint64_t &AddressPoint = AddressPoints[std::make_pair(RD, I->first)]; + + // Check if we already have the address points for this base. + assert(!AddressPoint && "Address point already exists for this base!"); + + AddressPoint = I->second; + } + + // If we don't have the vbase information for this class, insert it. + // getVirtualBaseOffsetOffset will compute it separately without computing + // the rest of the vtable related information. + if (!RD->getNumVBases()) + return; + + const RecordType *VBaseRT = + RD->vbases_begin()->getType()->getAs<RecordType>(); + const CXXRecordDecl *VBase = cast<CXXRecordDecl>(VBaseRT->getDecl()); + + if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase))) + return; + + for (VtableBuilder::VBaseOffsetOffsetsMapTy::const_iterator I = + Builder.getVBaseOffsetOffsets().begin(), + E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) { + // Insert all types. + ClassPairTy ClassPair(RD, I->first); + + VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second)); + } +} + +llvm::Constant * +CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD, + const uint64_t *Components, + unsigned NumComponents, + const VTableThunksTy &VTableThunks) { + llvm::SmallVector<llvm::Constant *, 64> Inits; + + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); + + const llvm::Type *PtrDiffTy = + CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); + + QualType ClassType = CGM.getContext().getTagDeclType(RD); + llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(ClassType); + + unsigned NextVTableThunkIndex = 0; + + llvm::Constant* PureVirtualFn = 0; + + for (unsigned I = 0; I != NumComponents; ++I) { + VtableComponent Component = + VtableComponent::getFromOpaqueInteger(Components[I]); + + llvm::Constant *Init = 0; + + switch (Component.getKind()) { + case VtableComponent::CK_VCallOffset: + Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVCallOffset()); + Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); + break; + case VtableComponent::CK_VBaseOffset: + Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVBaseOffset()); + Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); + break; + case VtableComponent::CK_OffsetToTop: + Init = llvm::ConstantInt::get(PtrDiffTy, Component.getOffsetToTop()); + Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); + break; + case VtableComponent::CK_RTTI: + Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy); + break; + case VtableComponent::CK_FunctionPointer: + case VtableComponent::CK_CompleteDtorPointer: + case VtableComponent::CK_DeletingDtorPointer: { + GlobalDecl GD; + + // Get the right global decl. + switch (Component.getKind()) { + default: + llvm_unreachable("Unexpected vtable component kind"); + case VtableComponent::CK_FunctionPointer: + GD = Component.getFunctionDecl(); + break; + case VtableComponent::CK_CompleteDtorPointer: + GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete); + break; + case VtableComponent::CK_DeletingDtorPointer: + GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting); + break; + } + + if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) { + // We have a pure virtual member function. + if (!PureVirtualFn) { + const llvm::FunctionType *Ty = + llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), + /*isVarArg=*/false); + PureVirtualFn = + CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual"); + PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn, + Int8PtrTy); + } + + Init = PureVirtualFn; + } else { + // Check if we should use a thunk. + if (NextVTableThunkIndex < VTableThunks.size() && + VTableThunks[NextVTableThunkIndex].first == I) { + const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second; + + Init = CGM.GetAddrOfThunk(GD, Thunk); + + NextVTableThunkIndex++; + } else { + const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); + const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVtable(MD); + + Init = CGM.GetAddrOfFunction(GD, Ty); + } + + Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy); + } + break; + } + + case VtableComponent::CK_UnusedFunctionPointer: + Init = llvm::ConstantExpr::getNullValue(Int8PtrTy); + break; + }; + + Inits.push_back(Init); + } + + llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents); + return llvm::ConstantArray::get(ArrayType, Inits.data(), Inits.size()); +} + +/// GetGlobalVariable - Will return a global variable of the given type. +/// If a variable with a different type already exists then a new variable +/// with the right type will be created. +/// FIXME: We should move this to CodeGenModule and rename it to something +/// better and then use it in CGVTT and CGRTTI. +static llvm::GlobalVariable * +GetGlobalVariable(llvm::Module &Module, llvm::StringRef Name, + const llvm::Type *Ty, + llvm::GlobalValue::LinkageTypes Linkage) { + + llvm::GlobalVariable *GV = Module.getNamedGlobal(Name); + llvm::GlobalVariable *OldGV = 0; + + if (GV) { + // Check if the variable has the right type. + if (GV->getType()->getElementType() == Ty) + return GV; + + assert(GV->isDeclaration() && "Declaration has wrong type!"); + + OldGV = GV; + } + + // Create a new variable. + GV = new llvm::GlobalVariable(Module, Ty, /*isConstant=*/true, + Linkage, 0, Name); + + if (OldGV) { + // Replace occurrences of the old variable if needed. + GV->takeName(OldGV); + + if (!OldGV->use_empty()) { + llvm::Constant *NewPtrForOldDecl = + llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); + OldGV->replaceAllUsesWith(NewPtrForOldDecl); } + + OldGV->eraseFromParent(); } + + return GV; } -llvm::GlobalVariable *CGVtableInfo::getVtable(const CXXRecordDecl *RD) { - llvm::GlobalVariable *Vtable = Vtables.lookup(RD); +llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) { + llvm::SmallString<256> OutName; + CGM.getMangleContext().mangleCXXVtable(RD, OutName); + llvm::StringRef Name = OutName.str(); + + ComputeVTableRelatedInformation(RD); - if (!Vtable) { - AddressPointsMapTy AddressPoints; - Vtable = GenerateVtable(llvm::GlobalValue::ExternalLinkage, - /*GenerateDefinition=*/false, RD, RD, 0, - /*IsVirtual=*/false, AddressPoints); + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); + llvm::ArrayType *ArrayType = + llvm::ArrayType::get(Int8PtrTy, getNumVTableComponents(RD)); + + return GetGlobalVariable(CGM.getModule(), Name, ArrayType, + llvm::GlobalValue::ExternalLinkage); +} + +void +CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable, + llvm::GlobalVariable::LinkageTypes Linkage, + const CXXRecordDecl *RD) { + // Dump the vtable layout if necessary. + if (CGM.getLangOptions().DumpVtableLayouts) { + VtableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD); + + Builder.dumpLayout(llvm::errs()); } - return Vtable; + assert(VTableThunksMap.count(RD) && + "No thunk status for this record decl!"); + + const VTableThunksTy& Thunks = VTableThunksMap[RD]; + + // Create and set the initializer. + llvm::Constant *Init = + CreateVTableInitializer(RD, getVTableComponentsData(RD), + getNumVTableComponents(RD), Thunks); + VTable->setInitializer(Init); + + // Set the correct linkage. + VTable->setLinkage(Linkage); } -void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) { +llvm::GlobalVariable * +CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD, + const BaseSubobject &Base, + bool BaseIsVirtual, + VTableAddressPointsMapTy& AddressPoints) { + VtableBuilder Builder(*this, Base.getBase(), Base.getBaseOffset(), + /*MostDerivedClassIsVirtual=*/BaseIsVirtual, RD); + + // Dump the vtable layout if necessary. + if (CGM.getLangOptions().DumpVtableLayouts) + Builder.dumpLayout(llvm::errs()); + + // Add the address points. + AddressPoints.insert(Builder.address_points_begin(), + Builder.address_points_end()); + + // Get the mangled construction vtable name. + llvm::SmallString<256> OutName; + CGM.getMangleContext().mangleCXXCtorVtable(RD, Base.getBaseOffset() / 8, + Base.getBase(), OutName); + llvm::StringRef Name = OutName.str(); + + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); + llvm::ArrayType *ArrayType = + llvm::ArrayType::get(Int8PtrTy, Builder.getNumVTableComponents()); + + // Create the variable that will hold the construction vtable. + llvm::GlobalVariable *VTable = + GetGlobalVariable(CGM.getModule(), Name, ArrayType, + llvm::GlobalValue::InternalLinkage); + + // Add the thunks. + VTableThunksTy VTableThunks; + VTableThunks.append(Builder.vtable_thunks_begin(), + Builder.vtable_thunks_end()); + + // Sort them. + std::sort(VTableThunks.begin(), VTableThunks.end()); + + // Create and set the initializer. + llvm::Constant *Init = + CreateVTableInitializer(Base.getBase(), + Builder.vtable_components_data_begin(), + Builder.getNumVTableComponents(), VTableThunks); + VTable->setInitializer(Init); + + return VTable; +} + +void +CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage, + const CXXRecordDecl *RD) { + llvm::GlobalVariable *&VTable = Vtables[RD]; + if (VTable) { + assert(VTable->getInitializer() && "Vtable doesn't have a definition!"); + return; + } + + VTable = GetAddrOfVTable(RD); + EmitVTableDefinition(VTable, Linkage, RD); + + GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD); +} + +void CodeGenVTables::EmitVTableRelatedData(GlobalDecl GD) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); const CXXRecordDecl *RD = MD->getParent(); @@ -3702,20 +3131,34 @@ void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) { if (!RD->isDynamicClass()) return; + // Check if we need to emit thunks for this function. + if (MD->isVirtual()) + EmitThunks(GD); + // Get the key function. const CXXMethodDecl *KeyFunction = CGM.getContext().getKeyFunction(RD); + TemplateSpecializationKind RDKind = RD->getTemplateSpecializationKind(); + TemplateSpecializationKind MDKind = MD->getTemplateSpecializationKind(); + if (KeyFunction) { // We don't have the right key function. if (KeyFunction->getCanonicalDecl() != MD->getCanonicalDecl()) return; + } else { + // If this is an explicit instantiation of a method, we don't need a vtable. + // Since we have no key function, we will emit the vtable when we see + // a use, and just defining a function is not an use. + if ((RDKind == TSK_ImplicitInstantiation || + RDKind == TSK_ExplicitInstantiationDeclaration) && + MDKind == TSK_ExplicitInstantiationDefinition) + return; } if (Vtables.count(RD)) return; - TemplateSpecializationKind kind = RD->getTemplateSpecializationKind(); - if (kind == TSK_ImplicitInstantiation) + if (RDKind == TSK_ImplicitInstantiation) CGM.DeferredVtables.push_back(RD); else GenerateClassData(CGM.getVtableLinkage(RD), RD); diff --git a/lib/CodeGen/CGVtable.h b/lib/CodeGen/CGVtable.h index 5a146ab..6073555 100644 --- a/lib/CodeGen/CGVtable.h +++ b/lib/CodeGen/CGVtable.h @@ -15,7 +15,6 @@ #define CLANG_CODEGEN_CGVTABLE_H #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/DenseSet.h" #include "llvm/GlobalVariable.h" #include "GlobalDecl.h" @@ -25,41 +24,93 @@ namespace clang { namespace CodeGen { class CodeGenModule; -/// ThunkAdjustment - Virtual and non-virtual adjustment for thunks. -class ThunkAdjustment { -public: - ThunkAdjustment(int64_t NonVirtual, int64_t Virtual) - : NonVirtual(NonVirtual), - Virtual(Virtual) { } - - ThunkAdjustment() - : NonVirtual(0), Virtual(0) { } +/// ReturnAdjustment - A return adjustment. +struct ReturnAdjustment { + /// NonVirtual - The non-virtual adjustment from the derived object to its + /// nearest virtual base. + int64_t NonVirtual; + + /// VBaseOffsetOffset - The offset (in bytes), relative to the address point + /// of the virtual base class offset. + int64_t VBaseOffsetOffset; + + ReturnAdjustment() : NonVirtual(0), VBaseOffsetOffset(0) { } + + bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; } - // isEmpty - Return whether this thunk adjustment is empty. - bool isEmpty() const { - return NonVirtual == 0 && Virtual == 0; + friend bool operator==(const ReturnAdjustment &LHS, + const ReturnAdjustment &RHS) { + return LHS.NonVirtual == RHS.NonVirtual && + LHS.VBaseOffsetOffset == RHS.VBaseOffsetOffset; } - /// NonVirtual - The non-virtual adjustment. + friend bool operator<(const ReturnAdjustment &LHS, + const ReturnAdjustment &RHS) { + if (LHS.NonVirtual < RHS.NonVirtual) + return true; + + return LHS.NonVirtual == RHS.NonVirtual && + LHS.VBaseOffsetOffset < RHS.VBaseOffsetOffset; + } +}; + +/// ThisAdjustment - A 'this' pointer adjustment. +struct ThisAdjustment { + /// NonVirtual - The non-virtual adjustment from the derived object to its + /// nearest virtual base. int64_t NonVirtual; - /// Virtual - The virtual adjustment. - int64_t Virtual; + /// VCallOffsetOffset - The offset (in bytes), relative to the address point, + /// of the virtual call offset. + int64_t VCallOffsetOffset; + + ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { } + + bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; } + + friend bool operator==(const ThisAdjustment &LHS, + const ThisAdjustment &RHS) { + return LHS.NonVirtual == RHS.NonVirtual && + LHS.VCallOffsetOffset == RHS.VCallOffsetOffset; + } + + friend bool operator<(const ThisAdjustment &LHS, + const ThisAdjustment &RHS) { + if (LHS.NonVirtual < RHS.NonVirtual) + return true; + + return LHS.NonVirtual == RHS.NonVirtual && + LHS.VCallOffsetOffset < RHS.VCallOffsetOffset; + } }; -/// CovariantThunkAdjustment - Adjustment of the 'this' pointer and the -/// return pointer for covariant thunks. -class CovariantThunkAdjustment { -public: - CovariantThunkAdjustment(const ThunkAdjustment &ThisAdjustment, - const ThunkAdjustment &ReturnAdjustment) - : ThisAdjustment(ThisAdjustment), ReturnAdjustment(ReturnAdjustment) { } +/// ThunkInfo - The 'this' pointer adjustment as well as an optional return +/// adjustment for a thunk. +struct ThunkInfo { + /// This - The 'this' pointer adjustment. + ThisAdjustment This; + + /// Return - The return adjustment. + ReturnAdjustment Return; - CovariantThunkAdjustment() { } + ThunkInfo() { } - ThunkAdjustment ThisAdjustment; - ThunkAdjustment ReturnAdjustment; -}; + ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return) + : This(This), Return(Return) { } + + friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) { + return LHS.This == RHS.This && LHS.Return == RHS.Return; + } + + friend bool operator<(const ThunkInfo &LHS, const ThunkInfo &RHS) { + if (LHS.This < RHS.This) + return true; + + return LHS.This == RHS.This && LHS.Return < RHS.Return; + } + + bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); } +}; // BaseSubobject - Uniquely identifies a direct or indirect base class. // Stores both the base class decl and the offset from the most derived class to @@ -126,19 +177,7 @@ template <> struct isPodLike<clang::CodeGen::BaseSubobject> { namespace clang { namespace CodeGen { -class CGVtableInfo { -public: - typedef std::vector<std::pair<GlobalDecl, ThunkAdjustment> > - AdjustmentVectorTy; - - typedef std::pair<const CXXRecordDecl *, uint64_t> CtorVtable_t; - typedef llvm::DenseMap<CtorVtable_t, int64_t> AddrSubMap_t; - typedef llvm::DenseMap<const CXXRecordDecl *, AddrSubMap_t *> AddrMap_t; - llvm::DenseMap<const CXXRecordDecl *, AddrMap_t*> AddressPoints; - - typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy; - -private: +class CodeGenVTables { CodeGenModule &CGM; /// MethodVtableIndices - Contains the index (relative to the vtable address @@ -163,31 +202,97 @@ private: /// pointers in the vtable for a given record decl. llvm::DenseMap<const CXXRecordDecl *, uint64_t> NumVirtualFunctionPointers; - typedef llvm::DenseMap<GlobalDecl, AdjustmentVectorTy> SavedAdjustmentsTy; - SavedAdjustmentsTy SavedAdjustments; - llvm::DenseSet<const CXXRecordDecl*> SavedAdjustmentRecords; + typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy; + typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy; + + /// Thunks - Contains all thunks that a given method decl will need. + ThunksMapTy Thunks; + + typedef llvm::DenseMap<const CXXRecordDecl *, uint64_t *> VTableLayoutMapTy; + + /// VTableLayoutMap - Stores the vtable layout for all record decls. + /// The layout is stored as an array of 64-bit integers, where the first + /// integer is the number of vtable entries in the layout, and the subsequent + /// integers are the vtable components. + VTableLayoutMapTy VTableLayoutMap; + + typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, + BaseSubobject>, uint64_t> AddressPointsMapTy; + + /// Address points - Address points for all vtables. + AddressPointsMapTy AddressPoints; + + /// VTableAddressPointsMapTy - Address points for a single vtable. + typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy; + + typedef llvm::SmallVector<std::pair<uint64_t, ThunkInfo>, 1> + VTableThunksTy; + + typedef llvm::DenseMap<const CXXRecordDecl *, VTableThunksTy> + VTableThunksMapTy; + + /// VTableThunksMap - Contains thunks needed by vtables. + VTableThunksMapTy VTableThunksMap; + + uint64_t getNumVTableComponents(const CXXRecordDecl *RD) const { + assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!"); + + return VTableLayoutMap.lookup(RD)[0]; + } + + const uint64_t *getVTableComponentsData(const CXXRecordDecl *RD) const { + assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!"); + + uint64_t *Components = VTableLayoutMap.lookup(RD); + return &Components[1]; + } + + typedef llvm::DenseMap<ClassPairTy, uint64_t> SubVTTIndiciesMapTy; + + /// SubVTTIndicies - Contains indices into the various sub-VTTs. + SubVTTIndiciesMapTy SubVTTIndicies; + + + typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, + BaseSubobject>, uint64_t> + SecondaryVirtualPointerIndicesMapTy; - typedef llvm::DenseMap<ClassPairTy, uint64_t> SubVTTIndiciesTy; - SubVTTIndiciesTy SubVTTIndicies; + /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer + /// indices. + SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices; /// getNumVirtualFunctionPointers - Return the number of virtual function /// pointers in the vtable for a given record decl. uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD); void ComputeMethodVtableIndices(const CXXRecordDecl *RD); - - llvm::GlobalVariable * - GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage, - bool GenerateDefinition, const CXXRecordDecl *LayoutClass, - const CXXRecordDecl *RD, uint64_t Offset, bool IsVirtual, - AddressPointsMapTy& AddressPoints); llvm::GlobalVariable *GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage, bool GenerateDefinition, const CXXRecordDecl *RD); + /// EmitThunk - Emit a single thunk. + void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk); + + /// EmitThunks - Emit the associated thunks for the given global decl. + void EmitThunks(GlobalDecl GD); + + /// ComputeVTableRelatedInformation - Compute and store all vtable related + /// information (vtable layout, vbase offset offsets, thunks etc) for the + /// given record decl. + void ComputeVTableRelatedInformation(const CXXRecordDecl *RD); + + /// CreateVTableInitializer - Create a vtable initializer for the given record + /// decl. + /// \param Components - The vtable components; this is really an array of + /// VTableComponents. + llvm::Constant *CreateVTableInitializer(const CXXRecordDecl *RD, + const uint64_t *Components, + unsigned NumComponents, + const VTableThunksTy &VTableThunks); + public: - CGVtableInfo(CodeGenModule &CGM) + CodeGenVTables(CodeGenModule &CGM) : CGM(CGM) { } /// needsVTTParameter - Return whether the given global decl needs a VTT @@ -199,6 +304,11 @@ public: /// given record decl. uint64_t getSubVTTIndex(const CXXRecordDecl *RD, const CXXRecordDecl *Base); + /// getSecondaryVirtualPointerIndex - Return the index in the VTT where the + /// virtual pointer for the given subobject is located. + uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, + BaseSubobject Base); + /// getMethodVtableIndex - Return the index (relative to the vtable address /// point) where the function pointer for the given virtual function is /// stored. @@ -212,35 +322,32 @@ public: int64_t getVirtualBaseOffsetOffset(const CXXRecordDecl *RD, const CXXRecordDecl *VBase); - AdjustmentVectorTy *getAdjustments(GlobalDecl GD); - - /// getVtableAddressPoint - returns the address point of the vtable for the - /// given record decl. - /// FIXME: This should return a list of address points. - uint64_t getVtableAddressPoint(const CXXRecordDecl *RD); - - llvm::GlobalVariable *getVtable(const CXXRecordDecl *RD); - - /// CtorVtableInfo - Information about a constructor vtable. - struct CtorVtableInfo { - /// Vtable - The vtable itself. - llvm::GlobalVariable *Vtable; + /// getAddressPoint - Get the address point of the given subobject in the + /// class decl. + uint64_t getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD); - /// AddressPoints - The address points in this constructor vtable. - AddressPointsMapTy AddressPoints; - - CtorVtableInfo() : Vtable(0) { } - }; + /// GetAddrOfVTable - Get the address of the vtable for the given record decl. + llvm::GlobalVariable *GetAddrOfVTable(const CXXRecordDecl *RD); + + /// EmitVTableDefinition - Emit the definition of the given vtable. + void EmitVTableDefinition(llvm::GlobalVariable *VTable, + llvm::GlobalVariable::LinkageTypes Linkage, + const CXXRecordDecl *RD); - CtorVtableInfo getCtorVtable(const CXXRecordDecl *RD, - const BaseSubobject &Base, - bool BaseIsVirtual); + /// GenerateConstructionVTable - Generate a construction vtable for the given + /// base subobject. + llvm::GlobalVariable * + GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base, + bool BaseIsVirtual, + VTableAddressPointsMapTy& AddressPoints); llvm::GlobalVariable *getVTT(const CXXRecordDecl *RD); - void MaybeEmitVtable(GlobalDecl GD); + // EmitVTableRelatedData - Will emit any thunks that the global decl might + // have, as well as the vtable itself if the global decl is the key function. + void EmitVTableRelatedData(GlobalDecl GD); - /// GenerateClassData - Generate all the class data requires to be generated + /// GenerateClassData - Generate all the class data required to be generated /// upon definition of a KeyFunction. This includes the vtable, the /// rtti data structure and the VTT. /// diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp index f455827..b863aff 100644 --- a/lib/CodeGen/CodeGenFunction.cpp +++ b/lib/CodeGen/CodeGenFunction.cpp @@ -199,8 +199,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0, false, false, 0, 0, - /*FIXME?*/false, - /*FIXME?*/CC_Default); + /*FIXME?*/ + FunctionType::ExtInfo()); // Emit subprogram debug descriptor. if (CGDebugInfo *DI = getDebugInfo()) { @@ -211,7 +211,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // FIXME: Leaked. // CC info is ignored, hopefully? CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args, - CC_Default, false); + FunctionType::ExtInfo()); if (RetTy->isVoidType()) { // Void type; nothing to return. @@ -279,7 +279,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) { Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType())); // Check if we need a VTT parameter as well. - if (CGVtableInfo::needsVTTParameter(GD)) { + if (CodeGenVTables::needsVTTParameter(GD)) { // FIXME: The comment about using a fake decl above applies here too. QualType T = getContext().getPointerType(getContext().VoidPtrTy); CXXVTTDecl = diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h index bd12c4a..f21350d 100644 --- a/lib/CodeGen/CodeGenFunction.h +++ b/lib/CodeGen/CodeGenFunction.h @@ -22,7 +22,6 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/ValueHandle.h" -#include <map> #include "CodeGenModule.h" #include "CGBlocks.h" #include "CGBuilder.h" @@ -254,6 +253,27 @@ public: } }; + /// CXXTemporariesCleanupScope - Enters a new scope for catching live + /// temporaries, all of which will be popped once the scope is exited. + class CXXTemporariesCleanupScope { + CodeGenFunction &CGF; + size_t NumLiveTemporaries; + + // DO NOT IMPLEMENT + CXXTemporariesCleanupScope(const CXXTemporariesCleanupScope &); + CXXTemporariesCleanupScope &operator=(const CXXTemporariesCleanupScope &); + + public: + explicit CXXTemporariesCleanupScope(CodeGenFunction &CGF) + : CGF(CGF), NumLiveTemporaries(CGF.LiveTemporaries.size()) { } + + ~CXXTemporariesCleanupScope() { + while (CGF.LiveTemporaries.size() > NumLiveTemporaries) + CGF.PopCXXTemporary(); + } + }; + + /// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup /// blocks that have been added. void EmitCleanupBlocks(size_t OldCleanupStackSize); @@ -504,30 +524,29 @@ public: /// legal to call this function even if there is no current insertion point. void FinishFunction(SourceLocation EndLoc=SourceLocation()); - /// DynamicTypeAdjust - Do the non-virtual and virtual adjustments on an - /// object pointer to alter the dynamic type of the pointer. Used by - /// GenerateCovariantThunk for building thunks. - llvm::Value *DynamicTypeAdjust(llvm::Value *V, - const ThunkAdjustment &Adjustment); - - /// GenerateThunk - Generate a thunk for the given method - llvm::Constant *GenerateThunk(llvm::Function *Fn, GlobalDecl GD, - bool Extern, - const ThunkAdjustment &ThisAdjustment); - llvm::Constant * - GenerateCovariantThunk(llvm::Function *Fn, GlobalDecl GD, - bool Extern, - const CovariantThunkAdjustment &Adjustment); - + /// GenerateThunk - Generate a thunk for the given method. + void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk); + void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type); - void InitializeVtablePtrs(const CXXRecordDecl *ClassDecl); + /// InitializeVTablePointer - Initialize the vtable pointer of the given + /// subobject. + /// + /// \param BaseIsMorallyVirtual - Whether the base subobject is a virtual base + /// or a direct or indirect base of a virtual base. + void InitializeVTablePointer(BaseSubobject Base, bool BaseIsMorallyVirtual, + llvm::Constant *VTable, + const CXXRecordDecl *VTableClass); + + typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy; + void InitializeVTablePointers(BaseSubobject Base, bool BaseIsMorallyVirtual, + bool BaseIsNonVirtualPrimaryBase, + llvm::Constant *VTable, + const CXXRecordDecl *VTableClass, + VisitedVirtualBasesSetTy& VBases); + + void InitializeVTablePointers(const CXXRecordDecl *ClassDecl); - void InitializeVtablePtrsRecursive(const CXXRecordDecl *ClassDecl, - llvm::Constant *Vtable, - CGVtableInfo::AddrSubMap_t& AddressPoints, - llvm::Value *ThisPtr, - uint64_t Offset); void SynthesizeCXXCopyConstructor(const FunctionArgList &Args); void SynthesizeCXXCopyAssignment(const FunctionArgList &Args); @@ -1272,6 +1291,10 @@ public: /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll /// generate a branch around the created basic block as necessary. llvm::BasicBlock* getTrapBB(); + + /// EmitCallArg - Emit a single call argument. + RValue EmitCallArg(const Expr *E, QualType ArgType); + private: void EmitReturnOfRValue(RValue RV, QualType Ty); @@ -1303,9 +1326,6 @@ private: /// current cleanup scope. void AddBranchFixup(llvm::BranchInst *BI); - /// EmitCallArg - Emit a single call argument. - RValue EmitCallArg(const Expr *E, QualType ArgType); - /// EmitCallArgs - Emit call arguments for a function. /// The CallArgTypeInfo parameter is used for iterating over the known /// argument types of the function being called. diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp index b4b5bbd..3c872c8 100644 --- a/lib/CodeGen/CodeGenModule.cpp +++ b/lib/CodeGen/CodeGenModule.cpp @@ -47,7 +47,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO, Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M), TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags), Types(C, M, TD, getTargetCodeGenInfo().getABIInfo()), - MangleCtx(C), VtableInfo(*this), Runtime(0), + MangleCtx(C), VTables(*this), Runtime(0), MemCpyFn(0), MemMoveFn(0), MemSetFn(0), CFConstantStringClassRef(0), VMContext(M.getContext()) { @@ -79,6 +79,7 @@ void CodeGenModule::createObjCRuntime() { } void CodeGenModule::Release() { + EmitFundamentalRTTIDescriptors(); EmitDeferred(); EmitCXXGlobalInitFunc(); EmitCXXGlobalDtorFunc(); @@ -495,7 +496,7 @@ void CodeGenModule::EmitDeferred() { if (!DeferredVtables.empty()) { const CXXRecordDecl *RD = DeferredVtables.back(); DeferredVtables.pop_back(); - getVtableInfo().GenerateClassData(getVtableLinkage(RD), RD); + getVTables().GenerateClassData(getVtableLinkage(RD), RD); continue; } @@ -714,20 +715,9 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) { Context.getSourceManager(), "Generating code for declaration"); - if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) { - getVtableInfo().MaybeEmitVtable(GD); - if (MD->isVirtual() && MD->isOutOfLine() && - (!isa<CXXDestructorDecl>(D) || GD.getDtorType() != Dtor_Base)) { - if (isa<CXXDestructorDecl>(D)) { - GlobalDecl CanonGD(cast<CXXDestructorDecl>(D->getCanonicalDecl()), - GD.getDtorType()); - BuildThunksForVirtual(CanonGD); - } else { - BuildThunksForVirtual(MD->getCanonicalDecl()); - } - } - } - + if (isa<CXXMethodDecl>(D)) + getVTables().EmitVTableRelatedData(GD); + if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D)) EmitCXXConstructor(CD, GD.getCtorType()); else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) @@ -758,7 +748,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName, if (WeakRefReferences.count(Entry)) { const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl()); if (FD && !FD->hasAttr<WeakAttr>()) - Entry->setLinkage(llvm::Function::ExternalLinkage); + Entry->setLinkage(llvm::Function::ExternalLinkage); WeakRefReferences.erase(Entry); } @@ -873,7 +863,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName, if (Entry) { if (WeakRefReferences.count(Entry)) { if (D && !D->hasAttr<WeakAttr>()) - Entry->setLinkage(llvm::Function::ExternalLinkage); + Entry->setLinkage(llvm::Function::ExternalLinkage); WeakRefReferences.erase(Entry); } @@ -1255,9 +1245,9 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old, if (!CI->use_empty()) CI->replaceAllUsesWith(NewCall); - // Copy any custom metadata attached with CI. - if (llvm::MDNode *DbgNode = CI->getMetadata("dbg")) - NewCall->setMetadata("dbg", DbgNode); + // Copy debug location attached to CI. + if (!CI->getDebugLoc().isUnknown()) + NewCall->setDebugLoc(CI->getDebugLoc()); CI->eraseFromParent(); } } diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h index febb856..3c57c0b 100644 --- a/lib/CodeGen/CodeGenModule.h +++ b/lib/CodeGen/CodeGenModule.h @@ -31,7 +31,6 @@ #include "llvm/ADT/StringSet.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Support/ValueHandle.h" -#include <list> namespace llvm { class Module; @@ -93,8 +92,8 @@ class CodeGenModule : public BlockModule { CodeGenTypes Types; MangleContext MangleCtx; - /// VtableInfo - Holds information about C++ vtables. - CGVtableInfo VtableInfo; + /// VTables - Holds information about C++ vtables. + CodeGenVTables VTables; CGObjCRuntime* Runtime; CGDebugInfo* DebugInfo; @@ -181,7 +180,7 @@ public: llvm::Module &getModule() const { return TheModule; } CodeGenTypes &getTypes() { return Types; } MangleContext &getMangleContext() { return MangleCtx; } - CGVtableInfo &getVtableInfo() { return VtableInfo; } + CodeGenVTables &getVTables() { return VTables; } Diagnostic &getDiags() const { return Diags; } const llvm::TargetData &getTargetData() const { return TheTargetData; } llvm::LLVMContext &getLLVMContext() { return VMContext; } @@ -225,36 +224,18 @@ public: /// for the given type. llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty); - llvm::Constant *GetAddrOfThunk(GlobalDecl GD, - const ThunkAdjustment &ThisAdjustment); - llvm::Constant *GetAddrOfCovariantThunk(GlobalDecl GD, - const CovariantThunkAdjustment &ThisAdjustment); - void BuildThunksForVirtual(GlobalDecl GD); - void BuildThunksForVirtualRecursive(GlobalDecl GD, GlobalDecl BaseOGD); + /// GetAddrOfThunk - Get the address of the thunk for the given global decl. + llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk); /// GetWeakRefReference - Get a reference to the target of VD. llvm::Constant *GetWeakRefReference(const ValueDecl *VD); - /// BuildThunk - Build a thunk for the given method. - llvm::Constant *BuildThunk(GlobalDecl GD, bool Extern, - const ThunkAdjustment &ThisAdjustment); - - /// BuildCoVariantThunk - Build a thunk for the given method - llvm::Constant * - BuildCovariantThunk(const GlobalDecl &GD, bool Extern, - const CovariantThunkAdjustment &Adjustment); - /// GetNonVirtualBaseClassOffset - Returns the offset from a derived class to /// its base class. Returns null if the offset is 0. llvm::Constant * GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl); - /// ComputeThunkAdjustment - Returns the two parts required to compute the - /// offset for an object. - ThunkAdjustment ComputeThunkAdjustment(const CXXRecordDecl *ClassDecl, - const CXXRecordDecl *BaseClassDecl); - /// GetStringForStringLiteral - Return the appropriate bytes for a string /// literal, properly padded to match the literal type. If only the address of /// a constant is needed consider using GetAddrOfConstantStringLiteral. @@ -523,6 +504,14 @@ private: void EmitAnnotations(void); + /// EmitFundamentalRTTIDescriptor - Emit the RTTI descriptors for the + /// given type. + void EmitFundamentalRTTIDescriptor(QualType Type); + + /// EmitFundamentalRTTIDescriptors - Emit the RTTI descriptors for the + /// builtin types. + void EmitFundamentalRTTIDescriptors(); + /// EmitDeferred - Emit any needed decls for which code generation /// was deferred. void EmitDeferred(void); diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp index 4feca4d..f53dd83 100644 --- a/lib/CodeGen/CodeGenTypes.cpp +++ b/lib/CodeGen/CodeGenTypes.cpp @@ -12,6 +12,8 @@ //===----------------------------------------------------------------------===// #include "CodeGenTypes.h" +#include "CGCall.h" +#include "CGRecordLayout.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclCXX.h" @@ -20,10 +22,6 @@ #include "llvm/DerivedTypes.h" #include "llvm/Module.h" #include "llvm/Target/TargetData.h" - -#include "CGCall.h" -#include "CGRecordLayoutBuilder.h" - using namespace clang; using namespace CodeGen; @@ -400,7 +398,6 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or /// enum. const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) { - // TagDecl's are not necessarily unique, instead use the (clang) // type connected to the decl. const Type *Key = @@ -449,7 +446,7 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) { } // Layout fields. - CGRecordLayout *Layout = CGRecordLayoutBuilder::ComputeLayout(*this, RD); + CGRecordLayout *Layout = ComputeRecordLayout(RD); CGRecordLayouts[Key] = Layout; const llvm::Type *ResultType = Layout->getLLVMType(); @@ -462,42 +459,11 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) { return ResultHolder.get(); } -/// getLLVMFieldNo - Return llvm::StructType element number -/// that corresponds to the field FD. -unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) { - assert(!FD->isBitField() && "Don't use getLLVMFieldNo on bit fields!"); - - llvm::DenseMap<const FieldDecl*, unsigned>::iterator I = FieldInfo.find(FD); - assert (I != FieldInfo.end() && "Unable to find field info"); - return I->second; -} - -/// addFieldInfo - Assign field number to field FD. -void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) { - FieldInfo[FD] = No; -} - -/// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field FD. -CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) { - llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator - I = BitFields.find(FD); - assert (I != BitFields.end() && "Unable to find bitfield info"); - return I->second; -} - -/// addBitFieldInfo - Assign a start bit and a size to field FD. -void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned FieldNo, - unsigned Start, unsigned Size) { - BitFields.insert(std::make_pair(FD, BitFieldInfo(FieldNo, Start, Size))); -} - /// getCGRecordLayout - Return record layout info for the given llvm::Type. const CGRecordLayout & -CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const { +CodeGenTypes::getCGRecordLayout(const RecordDecl *TD) const { const Type *Key = Context.getTagDeclType(TD).getTypePtr(); - llvm::DenseMap<const Type*, CGRecordLayout *>::const_iterator I - = CGRecordLayouts.find(Key); - assert (I != CGRecordLayouts.end() - && "Unable to find record layout information for type"); - return *I->second; + const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key); + assert(Layout && "Unable to find record layout information for type"); + return *Layout; } diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h index b2912ef..9b74106d 100644 --- a/lib/CodeGen/CodeGenTypes.h +++ b/lib/CodeGen/CodeGenTypes.h @@ -16,7 +16,6 @@ #include "llvm/Module.h" #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/SmallSet.h" #include <vector> #include "CGCall.h" @@ -52,37 +51,9 @@ namespace clang { typedef CanQual<Type> CanQualType; namespace CodeGen { + class CGRecordLayout; class CodeGenTypes; - /// CGRecordLayout - This class handles struct and union layout info while - /// lowering AST types to LLVM types. - class CGRecordLayout { - CGRecordLayout(); // DO NOT IMPLEMENT - - /// LLVMType - The LLVMType corresponding to this record layout. - const llvm::Type *LLVMType; - - /// ContainsPointerToDataMember - Whether one of the fields in this record - /// layout is a pointer to data member, or a struct that contains pointer to - /// data member. - bool ContainsPointerToDataMember; - - public: - CGRecordLayout(const llvm::Type *T, bool ContainsPointerToDataMember) - : LLVMType(T), ContainsPointerToDataMember(ContainsPointerToDataMember) { } - - /// getLLVMType - Return llvm type associated with this record. - const llvm::Type *getLLVMType() const { - return LLVMType; - } - - /// containsPointerToDataMember - Whether this struct contains pointers to - /// data members. - bool containsPointerToDataMember() const { - return ContainsPointerToDataMember; - } - }; - /// CodeGenTypes - This class organizes the cross-module state that is used /// while lowering AST types to LLVM types. class CodeGenTypes { @@ -107,32 +78,12 @@ class CodeGenTypes { /// CGRecordLayouts - This maps llvm struct type with corresponding /// record layout info. - /// FIXME : If CGRecordLayout is less than 16 bytes then use - /// inline it in the map. llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts; - /// FieldInfo - This maps struct field with corresponding llvm struct type - /// field no. This info is populated by record organizer. - llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo; - /// FunctionInfos - Hold memoized CGFunctionInfo results. llvm::FoldingSet<CGFunctionInfo> FunctionInfos; -public: - struct BitFieldInfo { - BitFieldInfo(unsigned FieldNo, - unsigned Start, - unsigned Size) - : FieldNo(FieldNo), Start(Start), Size(Size) {} - - unsigned FieldNo; - unsigned Start; - unsigned Size; - }; - private: - llvm::DenseMap<const FieldDecl *, BitFieldInfo> BitFields; - /// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder) /// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is /// used instead of llvm::Type because it allows us to bypass potential @@ -178,11 +129,7 @@ public: /// and/or incomplete argument types, this will return the opaque type. const llvm::Type *GetFunctionTypeForVtable(const CXXMethodDecl *MD); - const CGRecordLayout &getCGRecordLayout(const TagDecl*) const; - - /// getLLVMFieldNo - Return llvm::StructType element number - /// that corresponds to the field FD. - unsigned getLLVMFieldNo(const FieldDecl *FD); + const CGRecordLayout &getCGRecordLayout(const RecordDecl*) const; /// UpdateCompletedType - When we find the full definition for a TagDecl, /// replace the 'opaque' type we previously made for it if applicable. @@ -202,7 +149,7 @@ public: const CGFunctionInfo &getFunctionInfo(const CallArgList &Args, const FunctionType *Ty) { return getFunctionInfo(Ty->getResultType(), Args, - Ty->getCallConv(), Ty->getNoReturnAttr()); + Ty->getExtInfo()); } const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty); const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty); @@ -216,33 +163,22 @@ public: /// specified, the "C" calling convention will be used. const CGFunctionInfo &getFunctionInfo(QualType ResTy, const CallArgList &Args, - CallingConv CC, - bool NoReturn); + const FunctionType::ExtInfo &Info); const CGFunctionInfo &getFunctionInfo(QualType ResTy, const FunctionArgList &Args, - CallingConv CC, - bool NoReturn); + const FunctionType::ExtInfo &Info); /// Retrieves the ABI information for the given function signature. /// /// \param ArgTys - must all actually be canonical as params const CGFunctionInfo &getFunctionInfo(CanQualType RetTy, const llvm::SmallVectorImpl<CanQualType> &ArgTys, - CallingConv CC, - bool NoReturn); + const FunctionType::ExtInfo &Info); -public: // These are internal details of CGT that shouldn't be used externally. - /// addFieldInfo - Assign field number to field FD. - void addFieldInfo(const FieldDecl *FD, unsigned FieldNo); - - /// addBitFieldInfo - Assign a start bit and a size to field FD. - void addBitFieldInfo(const FieldDecl *FD, unsigned FieldNo, - unsigned Start, unsigned Size); - - /// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field - /// FD. - BitFieldInfo getBitFieldInfo(const FieldDecl *FD); + /// \brief Compute a new LLVM record layout object for the given record. + CGRecordLayout *ComputeRecordLayout(const RecordDecl *D); +public: // These are internal details of CGT that shouldn't be used externally. /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or /// enum. const llvm::Type *ConvertTagDeclType(const TagDecl *TD); diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp index f2a73f1..077db7c 100644 --- a/lib/CodeGen/Mangle.cpp +++ b/lib/CodeGen/Mangle.cpp @@ -102,7 +102,7 @@ public: llvm::raw_svector_ostream &getStream() { return Out; } void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z"); - void mangleCallOffset(const ThunkAdjustment &Adjustment); + void mangleCallOffset(int64_t NonVirtual, int64_t Virtual); void mangleNumber(int64_t Number); void mangleFunctionEncoding(const FunctionDecl *FD); void mangleName(const NamedDecl *ND); @@ -439,23 +439,23 @@ void CXXNameMangler::mangleNumber(int64_t Number) { Out << Number; } -void CXXNameMangler::mangleCallOffset(const ThunkAdjustment &Adjustment) { +void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) { // <call-offset> ::= h <nv-offset> _ // ::= v <v-offset> _ // <nv-offset> ::= <offset number> # non-virtual base override // <v-offset> ::= <offset number> _ <virtual offset number> // # virtual base override, with vcall offset - if (!Adjustment.Virtual) { + if (!Virtual) { Out << 'h'; - mangleNumber(Adjustment.NonVirtual); + mangleNumber(NonVirtual); Out << '_'; return; } Out << 'v'; - mangleNumber(Adjustment.NonVirtual); + mangleNumber(NonVirtual); Out << '_'; - mangleNumber(Adjustment.Virtual); + mangleNumber(Virtual); Out << '_'; } @@ -1131,15 +1131,20 @@ void CXXNameMangler::mangleType(const ComplexType *T) { } // GNU extension: vector types +// <type> ::= <vector-type> +// <vector-type> ::= Dv <positive dimension number> _ <element type> +// ::= Dv [<dimension expression>] _ <element type> void CXXNameMangler::mangleType(const VectorType *T) { - Out << "U8__vector"; + Out << "Dv" << T->getNumElements() << '_'; mangleType(T->getElementType()); } void CXXNameMangler::mangleType(const ExtVectorType *T) { mangleType(static_cast<const VectorType*>(T)); } void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) { - Out << "U8__vector"; + Out << "Dv"; + mangleExpression(T->getSizeExpr()); + Out << '_'; mangleType(T->getElementType()); } @@ -1159,7 +1164,7 @@ void CXXNameMangler::mangleType(const TemplateSpecializationType *T) { mangleName(TD, T->getArgs(), T->getNumArgs()); } -void CXXNameMangler::mangleType(const TypenameType *T) { +void CXXNameMangler::mangleType(const DependentNameType *T) { // Typename types are always nested Out << 'N'; mangleUnresolvedScope(T->getQualifier()); @@ -1451,8 +1456,9 @@ void CXXNameMangler::mangleExpression(const Expr *E) { // It isn't clear that we ever actually want to have such a // nested-name-specifier; why not just represent it as a typename type? if (!QTy && NNS->getAsIdentifier() && NNS->getPrefix()) { - QTy = getASTContext().getTypenameType(NNS->getPrefix(), - NNS->getAsIdentifier()) + QTy = getASTContext().getDependentNameType(ETK_Typename, + NNS->getPrefix(), + NNS->getAsIdentifier()) .getTypePtr(); } assert(QTy && "Qualifier was not type!"); @@ -1862,52 +1868,50 @@ void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type, Mangler.mangle(D); } -/// \brief Mangles the a thunk with the offset n for the declaration D and -/// emits that name to the given output stream. -void MangleContext::mangleThunk(const FunctionDecl *FD, - const ThunkAdjustment &ThisAdjustment, +void MangleContext::mangleThunk(const CXXMethodDecl *MD, + const ThunkInfo &Thunk, llvm::SmallVectorImpl<char> &Res) { - assert(!isa<CXXDestructorDecl>(FD) && - "Use mangleCXXDtor for destructor decls!"); - // <special-name> ::= T <call-offset> <base encoding> // # base is the nominal target function of thunk + // <special-name> ::= Tc <call-offset> <call-offset> <base encoding> + // # base is the nominal target function of thunk + // # first call-offset is 'this' adjustment + // # second call-offset is result adjustment + + assert(!isa<CXXDestructorDecl>(MD) && + "Use mangleCXXDtor for destructor decls!"); + CXXNameMangler Mangler(*this, Res); Mangler.getStream() << "_ZT"; - Mangler.mangleCallOffset(ThisAdjustment); - Mangler.mangleFunctionEncoding(FD); -} - -void MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *D, - CXXDtorType Type, - const ThunkAdjustment &ThisAdjustment, - llvm::SmallVectorImpl<char> &Res) { + if (!Thunk.Return.isEmpty()) + Mangler.getStream() << 'c'; + + // Mangle the 'this' pointer adjustment. + Mangler.mangleCallOffset(Thunk.This.NonVirtual, Thunk.This.VCallOffsetOffset); + + // Mangle the return pointer adjustment if there is one. + if (!Thunk.Return.isEmpty()) + Mangler.mangleCallOffset(Thunk.Return.NonVirtual, + Thunk.Return.VBaseOffsetOffset); + + Mangler.mangleFunctionEncoding(MD); +} + +void +MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type, + const ThisAdjustment &ThisAdjustment, + llvm::SmallVectorImpl<char> &Res) { // <special-name> ::= T <call-offset> <base encoding> // # base is the nominal target function of thunk - CXXNameMangler Mangler(*this, Res, D, Type); + + CXXNameMangler Mangler(*this, Res, DD, Type); Mangler.getStream() << "_ZT"; - Mangler.mangleCallOffset(ThisAdjustment); - Mangler.mangleFunctionEncoding(D); -} -/// \brief Mangles the a covariant thunk for the declaration D and emits that -/// name to the given output stream. -void -MangleContext::mangleCovariantThunk(const FunctionDecl *FD, - const CovariantThunkAdjustment& Adjustment, - llvm::SmallVectorImpl<char> &Res) { - assert(!isa<CXXDestructorDecl>(FD) && - "No such thing as a covariant thunk for a destructor!"); + // Mangle the 'this' pointer adjustment. + Mangler.mangleCallOffset(ThisAdjustment.NonVirtual, + ThisAdjustment.VCallOffsetOffset); - // <special-name> ::= Tc <call-offset> <call-offset> <base encoding> - // # base is the nominal target function of thunk - // # first call-offset is 'this' adjustment - // # second call-offset is result adjustment - CXXNameMangler Mangler(*this, Res); - Mangler.getStream() << "_ZTc"; - Mangler.mangleCallOffset(Adjustment.ThisAdjustment); - Mangler.mangleCallOffset(Adjustment.ReturnAdjustment); - Mangler.mangleFunctionEncoding(FD); + Mangler.mangleFunctionEncoding(DD); } /// mangleGuardVariable - Returns the mangled name for a guard variable diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h index 62656b9..91a5e97 100644 --- a/lib/CodeGen/Mangle.h +++ b/lib/CodeGen/Mangle.h @@ -28,13 +28,14 @@ namespace clang { class ASTContext; class CXXConstructorDecl; class CXXDestructorDecl; + class CXXMethodDecl; class FunctionDecl; class NamedDecl; class VarDecl; namespace CodeGen { - class CovariantThunkAdjustment; - class ThunkAdjustment; + struct ThisAdjustment; + struct ThunkInfo; /// MangleBuffer - a convenient class for storing a name which is /// either the result of a mangling or is a constant string with @@ -91,15 +92,12 @@ public: bool shouldMangleDeclName(const NamedDecl *D); void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &); - void mangleThunk(const FunctionDecl *FD, - const ThunkAdjustment &ThisAdjustment, + void mangleThunk(const CXXMethodDecl *MD, + const ThunkInfo &Thunk, llvm::SmallVectorImpl<char> &); - void mangleCXXDtorThunk(const CXXDestructorDecl *D, CXXDtorType Type, - const ThunkAdjustment &ThisAdjustment, + void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type, + const ThisAdjustment &ThisAdjustment, llvm::SmallVectorImpl<char> &); - void mangleCovariantThunk(const FunctionDecl *FD, - const CovariantThunkAdjustment& Adjustment, - llvm::SmallVectorImpl<char> &); void mangleGuardVariable(const VarDecl *D, llvm::SmallVectorImpl<char> &); void mangleCXXVtable(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &); void mangleCXXVTT(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &); |