diff options
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp')
-rw-r--r-- | contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp | 899 |
1 files changed, 174 insertions, 725 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp index 51d084e..f1b7286 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp @@ -29,25 +29,17 @@ using namespace clang; using namespace CodeGen; CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) - : BlockFunction(cgm, *this, Builder), CGM(cgm), - Target(CGM.getContext().Target), - Builder(cgm.getModule().getContext()), + : CodeGenTypeCache(cgm), CGM(cgm), + Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()), + BlockInfo(0), BlockPointer(0), NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1), ExceptionSlot(0), DebugInfo(0), IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), DidCallStackSave(false), UnreachableBlock(0), CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0), - ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0), + OutermostConditional(0), TerminateLandingPad(0), TerminateHandler(0), TrapBB(0) { - - // Get some frequently used types. - LLVMPointerWidth = Target.getPointerWidth(0); - llvm::LLVMContext &LLVMContext = CGM.getLLVMContext(); - IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth); - Int32Ty = llvm::Type::getInt32Ty(LLVMContext); - Int64Ty = llvm::Type::getInt64Ty(LLVMContext); - - Exceptions = getContext().getLangOptions().Exceptions; + CatchUndefined = getContext().getLangOptions().CatchUndefined; CGM.getCXXABI().getMangleContext().startNewFunction(); } @@ -125,7 +117,8 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { // Emit function epilog (to return). EmitReturnBlock(); - EmitFunctionInstrumentation("__cyg_profile_func_exit"); + if (ShouldInstrumentFunction()) + EmitFunctionInstrumentation("__cyg_profile_func_exit"); // Emit debug descriptor for function end. if (CGDebugInfo *DI = getDebugInfo()) { @@ -184,20 +177,16 @@ bool CodeGenFunction::ShouldInstrumentFunction() { /// instrumentation function with the current function and the call site, if /// function instrumentation is enabled. void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { - if (!ShouldInstrumentFunction()) - return; - const llvm::PointerType *PointerTy; const llvm::FunctionType *FunctionTy; std::vector<const llvm::Type*> ProfileFuncArgs; // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); - PointerTy = llvm::Type::getInt8PtrTy(VMContext); + PointerTy = Int8PtrTy; ProfileFuncArgs.push_back(PointerTy); ProfileFuncArgs.push_back(PointerTy); - FunctionTy = llvm::FunctionType::get( - llvm::Type::getVoidTy(VMContext), - ProfileFuncArgs, false); + FunctionTy = llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), + ProfileFuncArgs, false); llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); llvm::CallInst *CallSite = Builder.CreateCall( @@ -210,6 +199,15 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { CallSite); } +void CodeGenFunction::EmitMCountInstrumentation() { + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), false); + + llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy, + Target.getMCountName()); + Builder.CreateCall(MCountFn); +} + void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const FunctionArgList &Args, @@ -232,6 +230,19 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, break; } + if (getContext().getLangOptions().OpenCL) { + // Add metadata for a kernel function. + if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) + if (FD->hasAttr<OpenCLKernelAttr>()) { + llvm::LLVMContext &Context = getLLVMContext(); + llvm::NamedMDNode *OpenCLMetadata = + CGM.getModule().getOrInsertNamedMetadata("opencl.kernels"); + + llvm::Value *Op = Fn; + OpenCLMetadata->addOperand(llvm::MDNode::get(Context, &Op, 1)); + } + } + llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); // Create a marker to make it easy to insert allocas into the entryblock @@ -246,18 +257,23 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, Builder.SetInsertPoint(EntryBB); - QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0, - false, false, 0, 0, - /*FIXME?*/ - FunctionType::ExtInfo()); - // Emit subprogram debug descriptor. if (CGDebugInfo *DI = getDebugInfo()) { + // FIXME: what is going on here and why does it ignore all these + // interesting type properties? + QualType FnType = + getContext().getFunctionType(RetTy, 0, 0, + FunctionProtoType::ExtProtoInfo()); + DI->setLocation(StartLoc); DI->EmitFunctionStart(GD, FnType, CurFn, Builder); } - EmitFunctionInstrumentation("__cyg_profile_func_enter"); + if (ShouldInstrumentFunction()) + EmitFunctionInstrumentation("__cyg_profile_func_enter"); + + if (CGM.getCodeGenOpts().InstrumentForProfiling) + EmitMCountInstrumentation(); // FIXME: Leaked. // CC info is ignored, hopefully? @@ -384,8 +400,7 @@ bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { IgnoreCaseStmts = true; // Scan subexpressions for verboten labels. - for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); - I != E; ++I) + for (Stmt::const_child_range I = S->children(); I; ++I) if (ContainsLabel(*I, IgnoreCaseStmts)) return true; @@ -442,13 +457,15 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, // Emit the LHS as a conditional. If the LHS conditional is false, we // want to jump to the FalseBlock. llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); + + ConditionalEvaluation eval(*this); EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock); EmitBlock(LHSTrue); // Any temporaries created here are conditional. - BeginConditionalBranch(); + eval.begin(*this); EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); - EndConditionalBranch(); + eval.end(*this); return; } else if (CondBOp->getOpcode() == BO_LOr) { @@ -469,13 +486,15 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, // Emit the LHS as a conditional. If the LHS conditional is true, we // want to jump to the TrueBlock. llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); + + ConditionalEvaluation eval(*this); EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse); EmitBlock(LHSFalse); // Any temporaries created here are conditional. - BeginConditionalBranch(); + eval.begin(*this); EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); - EndConditionalBranch(); + eval.end(*this); return; } @@ -495,11 +514,20 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); + + ConditionalEvaluation cond(*this); EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock); + + cond.begin(*this); EmitBlock(LHSBlock); EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock); + cond.end(*this); + + cond.begin(*this); EmitBlock(RHSBlock); EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock); + cond.end(*this); + return; } } @@ -516,6 +544,57 @@ void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type, CGM.ErrorUnsupported(S, Type, OmitOnError); } +/// emitNonZeroVLAInit - Emit the "zero" initialization of a +/// variable-length array whose elements have a non-zero bit-pattern. +/// +/// \param src - a char* pointing to the bit-pattern for a single +/// base element of the array +/// \param sizeInChars - the total size of the VLA, in chars +/// \param align - the total alignment of the VLA +static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, + llvm::Value *dest, llvm::Value *src, + llvm::Value *sizeInChars) { + std::pair<CharUnits,CharUnits> baseSizeAndAlign + = CGF.getContext().getTypeInfoInChars(baseType); + + CGBuilderTy &Builder = CGF.Builder; + + llvm::Value *baseSizeInChars + = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity()); + + const llvm::Type *i8p = Builder.getInt8PtrTy(); + + llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin"); + llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end"); + + llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); + llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); + llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); + + // Make a loop over the VLA. C99 guarantees that the VLA element + // count must be nonzero. + CGF.EmitBlock(loopBB); + + llvm::PHINode *cur = Builder.CreatePHI(i8p, "vla.cur"); + cur->reserveOperandSpace(2); + cur->addIncoming(begin, originBB); + + // memcpy the individual element bit-pattern. + Builder.CreateMemCpy(cur, src, baseSizeInChars, + baseSizeAndAlign.second.getQuantity(), + /*volatile*/ false); + + // Go to the next element. + llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next"); + + // Leave if that's the end of the VLA. + llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); + Builder.CreateCondBr(done, contBB, loopBB); + cur->addIncoming(next, loopBB); + + CGF.EmitBlock(contBB); +} + void CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { // Ignore empty classes in C++. @@ -529,26 +608,42 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { // Cast the dest ptr to the appropriate i8 pointer type. unsigned DestAS = cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); - const llvm::Type *BP = - llvm::Type::getInt8PtrTy(VMContext, DestAS); + const llvm::Type *BP = Builder.getInt8PtrTy(DestAS); if (DestPtr->getType() != BP) DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); // Get size and alignment info for this aggregate. std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty); - uint64_t Size = TypeInfo.first; - unsigned Align = TypeInfo.second; + uint64_t Size = TypeInfo.first / 8; + unsigned Align = TypeInfo.second / 8; - // Don't bother emitting a zero-byte memset. - if (Size == 0) - return; + llvm::Value *SizeVal; + const VariableArrayType *vla; - llvm::ConstantInt *SizeVal = llvm::ConstantInt::get(IntPtrTy, Size / 8); - llvm::ConstantInt *AlignVal = Builder.getInt32(Align / 8); + // Don't bother emitting a zero-byte memset. + if (Size == 0) { + // But note that getTypeInfo returns 0 for a VLA. + if (const VariableArrayType *vlaType = + dyn_cast_or_null<VariableArrayType>( + getContext().getAsArrayType(Ty))) { + SizeVal = GetVLASize(vlaType); + vla = vlaType; + } else { + return; + } + } else { + SizeVal = llvm::ConstantInt::get(IntPtrTy, Size); + vla = 0; + } // If the type contains a pointer to data member we can't memset it to zero. // Instead, create a null constant and copy it to the destination. + // TODO: there are other patterns besides zero that we can usefully memset, + // like -1, which happens to be the pattern used by member-pointers. if (!CGM.getTypes().isZeroInitializable(Ty)) { + // For a VLA, emit a single element, then splat that over the VLA. + if (vla) Ty = getContext().getBaseElementType(vla); + llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); llvm::GlobalVariable *NullVariable = @@ -559,27 +654,20 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { llvm::Value *SrcPtr = Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()); - // FIXME: variable-size types? + if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); // Get and call the appropriate llvm.memcpy overload. - llvm::Constant *Memcpy = - CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(), IntPtrTy); - Builder.CreateCall5(Memcpy, DestPtr, SrcPtr, SizeVal, AlignVal, - /*volatile*/ Builder.getFalse()); + Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align, false); return; } // Otherwise, just memset the whole thing to zero. This is legal // because in LLVM, all default initializers (other than the ones we just // handled above) are guaranteed to have a bit pattern of all zeros. - - // FIXME: Handle variable sized types. - Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr, - Builder.getInt8(0), - SizeVal, AlignVal, /*volatile*/ Builder.getFalse()); + Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, Align, false); } -llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) { +llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { // Make sure that there is a block for the indirect goto. if (IndirectBranch == 0) GetIndirectGotoBlock(); @@ -597,8 +685,6 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto")); - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); - // Create the PHI node that indirect gotos will add entries to. llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest"); @@ -621,6 +707,9 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) { EnsureInsertPoint(); if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) { + // unknown size indication requires no size computation. + if (!VAT->getSizeExpr()) + return 0; llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()]; if (!SizeEntry) { @@ -649,6 +738,11 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) { return 0; } + if (const ParenType *PT = dyn_cast<ParenType>(Ty)) { + EmitVLASize(PT->getInnerType()); + return 0; + } + const PointerType *PT = Ty->getAs<PointerType>(); assert(PT && "unknown VM type!"); EmitVLASize(PT->getPointeeType()); @@ -656,686 +750,41 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) { } llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { - if (CGM.getContext().getBuiltinVaListType()->isArrayType()) + if (getContext().getBuiltinVaListType()->isArrayType()) return EmitScalarExpr(E); return EmitLValue(E).getAddress(); } -/// Pops cleanup blocks until the given savepoint is reached. -void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { - assert(Old.isValid()); - - while (EHStack.stable_begin() != Old) { - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); - - // As long as Old strictly encloses the scope's enclosing normal - // cleanup, we're going to emit another normal cleanup which - // fallthrough can propagate through. - bool FallThroughIsBranchThrough = - Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); - - PopCleanupBlock(FallThroughIsBranchThrough); - } -} - -static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, - EHCleanupScope &Scope) { - assert(Scope.isNormalCleanup()); - llvm::BasicBlock *Entry = Scope.getNormalBlock(); - if (!Entry) { - Entry = CGF.createBasicBlock("cleanup"); - Scope.setNormalBlock(Entry); - } - return Entry; -} - -static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF, - EHCleanupScope &Scope) { - assert(Scope.isEHCleanup()); - llvm::BasicBlock *Entry = Scope.getEHBlock(); - if (!Entry) { - Entry = CGF.createBasicBlock("eh.cleanup"); - Scope.setEHBlock(Entry); - } - return Entry; -} - -/// Transitions the terminator of the given exit-block of a cleanup to -/// be a cleanup switch. -static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, - llvm::BasicBlock *Block) { - // If it's a branch, turn it into a switch whose default - // destination is its original target. - llvm::TerminatorInst *Term = Block->getTerminator(); - assert(Term && "can't transition block without terminator"); - - if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { - assert(Br->isUnconditional()); - llvm::LoadInst *Load = - new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term); - llvm::SwitchInst *Switch = - llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); - Br->eraseFromParent(); - return Switch; - } else { - return cast<llvm::SwitchInst>(Term); - } -} - -/// Attempts to reduce a cleanup's entry block to a fallthrough. This -/// is basically llvm::MergeBlockIntoPredecessor, except -/// simplified/optimized for the tighter constraints on cleanup blocks. -/// -/// Returns the new block, whatever it is. -static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, - llvm::BasicBlock *Entry) { - llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); - if (!Pred) return Entry; - - llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); - if (!Br || Br->isConditional()) return Entry; - assert(Br->getSuccessor(0) == Entry); - - // If we were previously inserting at the end of the cleanup entry - // block, we'll need to continue inserting at the end of the - // predecessor. - bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; - assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); - - // Kill the branch. - Br->eraseFromParent(); - - // Merge the blocks. - Pred->getInstList().splice(Pred->end(), Entry->getInstList()); - - // Kill the entry block. - Entry->eraseFromParent(); - - if (WasInsertBlock) - CGF.Builder.SetInsertPoint(Pred); - - return Pred; -} - -static void EmitCleanup(CodeGenFunction &CGF, - EHScopeStack::Cleanup *Fn, - bool ForEH) { - if (ForEH) CGF.EHStack.pushTerminate(); - Fn->Emit(CGF, ForEH); - if (ForEH) CGF.EHStack.popTerminate(); - assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); -} - -/// Pops a cleanup block. If the block includes a normal cleanup, the -/// current insertion point is threaded through the cleanup, as are -/// any branch fixups on the cleanup. -void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { - assert(!EHStack.empty() && "cleanup stack is empty!"); - assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); - assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); - assert(Scope.isActive() && "cleanup was still inactive when popped!"); - - // Check whether we need an EH cleanup. This is only true if we've - // generated a lazy EH cleanup block. - bool RequiresEHCleanup = Scope.hasEHBranches(); - - // Check the three conditions which might require a normal cleanup: - - // - whether there are branch fix-ups through this cleanup - unsigned FixupDepth = Scope.getFixupDepth(); - bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; - - // - whether there are branch-throughs or branch-afters - bool HasExistingBranches = Scope.hasBranches(); - - // - whether there's a fallthrough - llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); - bool HasFallthrough = (FallthroughSource != 0); - - bool RequiresNormalCleanup = false; - if (Scope.isNormalCleanup() && - (HasFixups || HasExistingBranches || HasFallthrough)) { - RequiresNormalCleanup = true; - } - - // If we don't need the cleanup at all, we're done. - if (!RequiresNormalCleanup && !RequiresEHCleanup) { - EHStack.popCleanup(); // safe because there are no fixups - assert(EHStack.getNumBranchFixups() == 0 || - EHStack.hasNormalCleanups()); - return; - } - - // Copy the cleanup emission data out. Note that SmallVector - // guarantees maximal alignment for its buffer regardless of its - // type parameter. - llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer; - CleanupBuffer.reserve(Scope.getCleanupSize()); - memcpy(CleanupBuffer.data(), - Scope.getCleanupBuffer(), Scope.getCleanupSize()); - CleanupBuffer.set_size(Scope.getCleanupSize()); - EHScopeStack::Cleanup *Fn = - reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data()); - - // We want to emit the EH cleanup after the normal cleanup, but go - // ahead and do the setup for the EH cleanup while the scope is still - // alive. - llvm::BasicBlock *EHEntry = 0; - llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend; - if (RequiresEHCleanup) { - EHEntry = CreateEHEntry(*this, Scope); - - // Figure out the branch-through dest if necessary. - llvm::BasicBlock *EHBranchThroughDest = 0; - if (Scope.hasEHBranchThroughs()) { - assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end()); - EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup()); - EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S)); - } - - // If we have exactly one branch-after and no branch-throughs, we - // can dispatch it without a switch. - if (!Scope.hasEHBranchThroughs() && - Scope.getNumEHBranchAfters() == 1) { - assert(!EHBranchThroughDest); - - // TODO: remove the spurious eh.cleanup.dest stores if this edge - // never went through any switches. - llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0); - EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest)); - - // Otherwise, if we have any branch-afters, we need a switch. - } else if (Scope.getNumEHBranchAfters()) { - // The default of the switch belongs to the branch-throughs if - // they exist. - llvm::BasicBlock *Default = - (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock()); - - const unsigned SwitchCapacity = Scope.getNumEHBranchAfters(); - - llvm::LoadInst *Load = - new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest"); - llvm::SwitchInst *Switch = - llvm::SwitchInst::Create(Load, Default, SwitchCapacity); - - EHInstsToAppend.push_back(Load); - EHInstsToAppend.push_back(Switch); - - for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I) - Switch->addCase(Scope.getEHBranchAfterIndex(I), - Scope.getEHBranchAfterBlock(I)); - - // Otherwise, we have only branch-throughs; jump to the next EH - // cleanup. - } else { - assert(EHBranchThroughDest); - EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest)); - } - } - - if (!RequiresNormalCleanup) { - EHStack.popCleanup(); - } else { - // As a kindof crazy internal case, branch-through fall-throughs - // leave the insertion point set to the end of the last cleanup. - bool HasPrebranchedFallthrough = - (HasFallthrough && FallthroughSource->getTerminator()); - assert(!HasPrebranchedFallthrough || - FallthroughSource->getTerminator()->getSuccessor(0) - == Scope.getNormalBlock()); - - // If we have a fallthrough and no other need for the cleanup, - // emit it directly. - if (HasFallthrough && !HasPrebranchedFallthrough && - !HasFixups && !HasExistingBranches) { - - // Fixups can cause us to optimistically create a normal block, - // only to later have no real uses for it. Just delete it in - // this case. - // TODO: we can potentially simplify all the uses after this. - if (Scope.getNormalBlock()) { - Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock()); - delete Scope.getNormalBlock(); - } - - EHStack.popCleanup(); - - EmitCleanup(*this, Fn, /*ForEH*/ false); - - // Otherwise, the best approach is to thread everything through - // the cleanup block and then try to clean up after ourselves. - } else { - // Force the entry block to exist. - llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); - - // If there's a fallthrough, we need to store the cleanup - // destination index. For fall-throughs this is always zero. - if (HasFallthrough && !HasPrebranchedFallthrough) - Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); - - // Emit the entry block. This implicitly branches to it if we - // have fallthrough. All the fixups and existing branches should - // already be branched to it. - EmitBlock(NormalEntry); - - bool HasEnclosingCleanups = - (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); - - // Compute the branch-through dest if we need it: - // - if there are branch-throughs threaded through the scope - // - if fall-through is a branch-through - // - if there are fixups that will be optimistically forwarded - // to the enclosing cleanup - llvm::BasicBlock *BranchThroughDest = 0; - if (Scope.hasBranchThroughs() || - (HasFallthrough && FallthroughIsBranchThrough) || - (HasFixups && HasEnclosingCleanups)) { - assert(HasEnclosingCleanups); - EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); - BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); - } - - llvm::BasicBlock *FallthroughDest = 0; - llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend; - - // If there's exactly one branch-after and no other threads, - // we can route it without a switch. - if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && - Scope.getNumBranchAfters() == 1) { - assert(!BranchThroughDest); - - // TODO: clean up the possibly dead stores to the cleanup dest slot. - llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); - InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); - - // Build a switch-out if we need it: - // - if there are branch-afters threaded through the scope - // - if fall-through is a branch-after - // - if there are fixups that have nowhere left to go and - // so must be immediately resolved - } else if (Scope.getNumBranchAfters() || - (HasFallthrough && !FallthroughIsBranchThrough) || - (HasFixups && !HasEnclosingCleanups)) { - - llvm::BasicBlock *Default = - (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); - - // TODO: base this on the number of branch-afters and fixups - const unsigned SwitchCapacity = 10; - - llvm::LoadInst *Load = - new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest"); - llvm::SwitchInst *Switch = - llvm::SwitchInst::Create(Load, Default, SwitchCapacity); - - InstsToAppend.push_back(Load); - InstsToAppend.push_back(Switch); - - // Branch-after fallthrough. - if (HasFallthrough && !FallthroughIsBranchThrough) { - FallthroughDest = createBasicBlock("cleanup.cont"); - Switch->addCase(Builder.getInt32(0), FallthroughDest); - } - - for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { - Switch->addCase(Scope.getBranchAfterIndex(I), - Scope.getBranchAfterBlock(I)); - } - - if (HasFixups && !HasEnclosingCleanups) - ResolveAllBranchFixups(Switch); - } else { - // We should always have a branch-through destination in this case. - assert(BranchThroughDest); - InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); - } - - // We're finally ready to pop the cleanup. - EHStack.popCleanup(); - assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); - - EmitCleanup(*this, Fn, /*ForEH*/ false); - - // Append the prepared cleanup prologue from above. - llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); - for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) - NormalExit->getInstList().push_back(InstsToAppend[I]); - - // Optimistically hope that any fixups will continue falling through. - for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); - I < E; ++I) { - BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); - if (!Fixup.Destination) continue; - if (!Fixup.OptimisticBranchBlock) { - new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex), - getNormalCleanupDestSlot(), - Fixup.InitialBranch); - Fixup.InitialBranch->setSuccessor(0, NormalEntry); - } - Fixup.OptimisticBranchBlock = NormalExit; - } - - if (FallthroughDest) - EmitBlock(FallthroughDest); - else if (!HasFallthrough) - Builder.ClearInsertionPoint(); - - // Check whether we can merge NormalEntry into a single predecessor. - // This might invalidate (non-IR) pointers to NormalEntry. - llvm::BasicBlock *NewNormalEntry = - SimplifyCleanupEntry(*this, NormalEntry); - - // If it did invalidate those pointers, and NormalEntry was the same - // as NormalExit, go back and patch up the fixups. - if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) - for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); - I < E; ++I) - CGF.EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; - } - } - - assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); - - // Emit the EH cleanup if required. - if (RequiresEHCleanup) { - CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); - - EmitBlock(EHEntry); - EmitCleanup(*this, Fn, /*ForEH*/ true); - - // Append the prepared cleanup prologue from above. - llvm::BasicBlock *EHExit = Builder.GetInsertBlock(); - for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I) - EHExit->getInstList().push_back(EHInstsToAppend[I]); - - Builder.restoreIP(SavedIP); - - SimplifyCleanupEntry(*this, EHEntry); - } -} - -/// Terminate the current block by emitting a branch which might leave -/// the current cleanup-protected scope. The target scope may not yet -/// be known, in which case this will require a fixup. -/// -/// As a side-effect, this method clears the insertion point. -void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { - assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup()) - && "stale jump destination"); - - if (!HaveInsertPoint()) - return; - - // Create the branch. - llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); - - // Calculate the innermost active normal cleanup. - EHScopeStack::stable_iterator - TopCleanup = EHStack.getInnermostActiveNormalCleanup(); - - // If we're not in an active normal cleanup scope, or if the - // destination scope is within the innermost active normal cleanup - // scope, we don't need to worry about fixups. - if (TopCleanup == EHStack.stable_end() || - TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid - Builder.ClearInsertionPoint(); - return; - } - - // If we can't resolve the destination cleanup scope, just add this - // to the current cleanup scope as a branch fixup. - if (!Dest.getScopeDepth().isValid()) { - BranchFixup &Fixup = EHStack.addBranchFixup(); - Fixup.Destination = Dest.getBlock(); - Fixup.DestinationIndex = Dest.getDestIndex(); - Fixup.InitialBranch = BI; - Fixup.OptimisticBranchBlock = 0; - - Builder.ClearInsertionPoint(); - return; - } - - // Otherwise, thread through all the normal cleanups in scope. - - // Store the index at the start. - llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); - new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI); - - // Adjust BI to point to the first cleanup block. - { - EHCleanupScope &Scope = - cast<EHCleanupScope>(*EHStack.find(TopCleanup)); - BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); - } - - // Add this destination to all the scopes involved. - EHScopeStack::stable_iterator I = TopCleanup; - EHScopeStack::stable_iterator E = Dest.getScopeDepth(); - if (E.strictlyEncloses(I)) { - while (true) { - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); - assert(Scope.isNormalCleanup()); - I = Scope.getEnclosingNormalCleanup(); - - // If this is the last cleanup we're propagating through, tell it - // that there's a resolved jump moving through it. - if (!E.strictlyEncloses(I)) { - Scope.addBranchAfter(Index, Dest.getBlock()); - break; - } - - // Otherwise, tell the scope that there's a jump propoagating - // through it. If this isn't new information, all the rest of - // the work has been done before. - if (!Scope.addBranchThrough(Dest.getBlock())) - break; - } - } - - Builder.ClearInsertionPoint(); -} - -void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) { - // We should never get invalid scope depths for an UnwindDest; that - // implies that the destination wasn't set up correctly. - assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?"); - - if (!HaveInsertPoint()) - return; - - // Create the branch. - llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); - - // Calculate the innermost active cleanup. - EHScopeStack::stable_iterator - InnermostCleanup = EHStack.getInnermostActiveEHCleanup(); - - // If the destination is in the same EH cleanup scope as us, we - // don't need to thread through anything. - if (InnermostCleanup.encloses(Dest.getScopeDepth())) { - Builder.ClearInsertionPoint(); - return; - } - assert(InnermostCleanup != EHStack.stable_end()); - - // Store the index at the start. - llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); - new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI); - - // Adjust BI to point to the first cleanup block. - { - EHCleanupScope &Scope = - cast<EHCleanupScope>(*EHStack.find(InnermostCleanup)); - BI->setSuccessor(0, CreateEHEntry(*this, Scope)); - } - - // Add this destination to all the scopes involved. - for (EHScopeStack::stable_iterator - I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) { - assert(E.strictlyEncloses(I)); - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); - assert(Scope.isEHCleanup()); - I = Scope.getEnclosingEHCleanup(); - - // If this is the last cleanup we're propagating through, add this - // as a branch-after. - if (I == E) { - Scope.addEHBranchAfter(Index, Dest.getBlock()); - break; - } - - // Otherwise, add it as a branch-through. If this isn't new - // information, all the rest of the work has been done before. - if (!Scope.addEHBranchThrough(Dest.getBlock())) - break; - } - - Builder.ClearInsertionPoint(); -} - -/// All the branch fixups on the EH stack have propagated out past the -/// outermost normal cleanup; resolve them all by adding cases to the -/// given switch instruction. -void CodeGenFunction::ResolveAllBranchFixups(llvm::SwitchInst *Switch) { - llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; - - for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { - // Skip this fixup if its destination isn't set or if we've - // already treated it. - BranchFixup &Fixup = EHStack.getBranchFixup(I); - if (Fixup.Destination == 0) continue; - if (!CasesAdded.insert(Fixup.Destination)) continue; - - Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), - Fixup.Destination); - } - - EHStack.clearFixups(); -} - -void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { - assert(Block && "resolving a null target block"); - if (!EHStack.getNumBranchFixups()) return; - - assert(EHStack.hasNormalCleanups() && - "branch fixups exist with no normal cleanups on stack"); - - llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; - bool ResolvedAny = false; - - for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { - // Skip this fixup if its destination doesn't match. - BranchFixup &Fixup = EHStack.getBranchFixup(I); - if (Fixup.Destination != Block) continue; - - Fixup.Destination = 0; - ResolvedAny = true; - - // If it doesn't have an optimistic branch block, LatestBranch is - // already pointing to the right place. - llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; - if (!BranchBB) - continue; - - // Don't process the same optimistic branch block twice. - if (!ModifiedOptimisticBlocks.insert(BranchBB)) - continue; - - llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); - - // Add a case to the switch. - Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); - } - - if (ResolvedAny) - EHStack.popNullFixups(); +void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, + llvm::Constant *Init) { + assert (Init && "Invalid DeclRefExpr initializer!"); + if (CGDebugInfo *Dbg = getDebugInfo()) + Dbg->EmitGlobalVariable(E->getDecl(), Init); } -/// Activate a cleanup that was created in an inactivated state. -void CodeGenFunction::ActivateCleanup(EHScopeStack::stable_iterator C) { - assert(C != EHStack.stable_end() && "activating bottom of stack?"); - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); - assert(!Scope.isActive() && "double activation"); - - // Calculate whether the cleanup was used: - bool Used = false; +CodeGenFunction::PeepholeProtection +CodeGenFunction::protectFromPeepholes(RValue rvalue) { + // At the moment, the only aggressive peephole we do in IR gen + // is trunc(zext) folding, but if we add more, we can easily + // extend this protection. - // - as a normal cleanup - if (Scope.isNormalCleanup()) { - bool NormalUsed = false; - if (Scope.getNormalBlock()) { - NormalUsed = true; - } else { - // Check whether any enclosed cleanups were needed. - for (EHScopeStack::stable_iterator - I = EHStack.getInnermostNormalCleanup(); I != C; ) { - assert(C.strictlyEncloses(I)); - EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); - if (S.getNormalBlock()) { - NormalUsed = true; - break; - } - I = S.getEnclosingNormalCleanup(); - } - } + if (!rvalue.isScalar()) return PeepholeProtection(); + llvm::Value *value = rvalue.getScalarVal(); + if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); - if (NormalUsed) - Used = true; - else - Scope.setActivatedBeforeNormalUse(true); - } + // Just make an extra bitcast. + assert(HaveInsertPoint()); + llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", + Builder.GetInsertBlock()); - // - as an EH cleanup - if (Scope.isEHCleanup()) { - bool EHUsed = false; - if (Scope.getEHBlock()) { - EHUsed = true; - } else { - // Check whether any enclosed cleanups were needed. - for (EHScopeStack::stable_iterator - I = EHStack.getInnermostEHCleanup(); I != C; ) { - assert(C.strictlyEncloses(I)); - EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); - if (S.getEHBlock()) { - EHUsed = true; - break; - } - I = S.getEnclosingEHCleanup(); - } - } - - if (EHUsed) - Used = true; - else - Scope.setActivatedBeforeEHUse(true); - } - - llvm::AllocaInst *Var = EHCleanupScope::activeSentinel(); - if (Used) { - Var = CreateTempAlloca(Builder.getInt1Ty()); - InitTempAlloca(Var, Builder.getFalse()); - } - Scope.setActiveVar(Var); + PeepholeProtection protection; + protection.Inst = inst; + return protection; } -llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() { - if (!NormalCleanupDest) - NormalCleanupDest = - CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); - return NormalCleanupDest; -} +void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { + if (!protection.Inst) return; -llvm::Value *CodeGenFunction::getEHCleanupDestSlot() { - if (!EHCleanupDest) - EHCleanupDest = - CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot"); - return EHCleanupDest; -} - -void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, - llvm::ConstantInt *Init) { - assert (Init && "Invalid DeclRefExpr initializer!"); - if (CGDebugInfo *Dbg = getDebugInfo()) - Dbg->EmitGlobalVariable(E->getDecl(), Init, Builder); + // In theory, we could try to duplicate the peepholes now, but whatever. + protection.Inst->eraseFromParent(); } |