summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp')
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp123
1 files changed, 81 insertions, 42 deletions
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index c46539b..2b9ba2c 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -44,6 +44,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/Loads.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
@@ -58,11 +59,12 @@
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
@@ -164,7 +166,7 @@ bool FastISel::hasTrivialKill(const Value *V) {
}
unsigned FastISel::getRegForValue(const Value *V) {
- EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
+ EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
// Don't handle non-simple values in FastISel.
if (!RealVT.isSimple())
return 0;
@@ -226,7 +228,7 @@ unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
if (!Reg) {
// Try to emit the constant by using an integer constant with a cast.
const APFloat &Flt = CF->getValueAPF();
- EVT IntVT = TLI.getPointerTy();
+ EVT IntVT = TLI.getPointerTy(DL);
uint64_t x[2];
uint32_t IntBitWidth = IntVT.getSizeInBits();
@@ -319,7 +321,7 @@ std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
bool IdxNIsKill = hasTrivialKill(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend it.
- MVT PtrVT = TLI.getPointerTy();
+ MVT PtrVT = TLI.getPointerTy(DL);
EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
if (IdxVT.bitsLT(PtrVT)) {
IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
@@ -424,7 +426,7 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
// Check if the second operand is a constant and handle it appropriately.
if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint64_t Imm = CI->getZExtValue();
+ uint64_t Imm = CI->getSExtValue();
// Transform "sdiv exact X, 8" -> "sra X, 3".
if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
@@ -491,7 +493,7 @@ bool FastISel::selectGetElementPtr(const User *I) {
// FIXME: What's a good SWAG number for MaxOffs?
uint64_t MaxOffs = 2048;
Type *Ty = I->getOperand(0)->getType();
- MVT VT = TLI.getPointerTy();
+ MVT VT = TLI.getPointerTy(DL);
for (GetElementPtrInst::const_op_iterator OI = I->op_begin() + 1,
E = I->op_end();
OI != E; ++OI) {
@@ -700,6 +702,15 @@ bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
return lowerCallTo(CLI);
}
+FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
+ const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
+ const char *Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
+ SmallString<32> MangledName;
+ Mangler::getNameWithPrefix(MangledName, Target, DL);
+ MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
+ return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
+}
+
bool FastISel::selectPatchpoint(const CallInst *I) {
// void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
// i32 <numBytes>,
@@ -710,7 +721,7 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
CallingConv::ID CC = I->getCallingConv();
bool IsAnyRegCC = CC == CallingConv::AnyReg;
bool HasDef = !I->getType()->isVoidTy();
- Value *Callee = I->getOperand(PatchPointOpers::TargetPos);
+ Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
// Get the real number of arguments participating in the call <numArgs>
assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
@@ -756,23 +767,25 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
- // Assume that the callee is a constant address or null pointer.
- // FIXME: handle function symbols in the future.
- uint64_t CalleeAddr;
- if (const auto *C = dyn_cast<IntToPtrInst>(Callee))
- CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
- else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
- if (C->getOpcode() == Instruction::IntToPtr)
- CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
- else
+ // Add the call target.
+ if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
+ uint64_t CalleeConstAddr =
+ cast<ConstantInt>(C->getOperand(0))->getZExtValue();
+ Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
+ } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
+ if (C->getOpcode() == Instruction::IntToPtr) {
+ uint64_t CalleeConstAddr =
+ cast<ConstantInt>(C->getOperand(0))->getZExtValue();
+ Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
+ } else
llvm_unreachable("Unsupported ConstantExpr.");
+ } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
+ Ops.push_back(MachineOperand::CreateGA(GV, 0));
} else if (isa<ConstantPointerNull>(Callee))
- CalleeAddr = 0;
+ Ops.push_back(MachineOperand::CreateImm(0));
else
llvm_unreachable("Unsupported callee address.");
- Ops.push_back(MachineOperand::CreateImm(CalleeAddr));
-
// Adjust <numArgs> to account for any arguments that have been passed on
// the stack instead.
unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
@@ -801,7 +814,8 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
return false;
// Push the register mask info.
- Ops.push_back(MachineOperand::CreateRegMask(TRI.getCallPreservedMask(CC)));
+ Ops.push_back(MachineOperand::CreateRegMask(
+ TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
// Add scratch registers as implicit def and early clobber.
const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
@@ -852,6 +866,15 @@ static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
unsigned NumArgs) {
+ MCContext &Ctx = MF->getContext();
+ SmallString<32> MangledName;
+ Mangler::getNameWithPrefix(MangledName, SymName, DL);
+ MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
+ return lowerCallTo(CI, Sym, NumArgs);
+}
+
+bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
+ unsigned NumArgs) {
ImmutableCallSite CS(CI);
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
@@ -876,7 +899,7 @@ bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
}
CallLoweringInfo CLI;
- CLI.setCallee(RetTy, FTy, SymName, std::move(Args), CS, NumArgs);
+ CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
return lowerCallTo(CLI);
}
@@ -885,10 +908,10 @@ bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
// Handle the incoming return values from the call.
CLI.clearIns();
SmallVector<EVT, 4> RetTys;
- ComputeValueVTs(TLI, CLI.RetTy, RetTys);
+ ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
SmallVector<ISD::OutputArg, 4> Outs;
- GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI);
+ GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
bool CanLowerReturn = TLI.CanLowerReturn(
CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
@@ -953,7 +976,7 @@ bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
// not there, but there are cases it cannot get right.
unsigned FrameAlign = Arg.Alignment;
if (!FrameAlign)
- FrameAlign = TLI.getByValTypeAlignment(ElementTy);
+ FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
Flags.setByValSize(FrameSize);
Flags.setByValAlign(FrameAlign);
}
@@ -1077,12 +1100,17 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
// The donothing intrinsic does, well, nothing.
case Intrinsic::donothing:
return true;
+ case Intrinsic::eh_actions: {
+ unsigned ResultReg = getRegForValue(UndefValue::get(II->getType()));
+ if (!ResultReg)
+ return false;
+ updateValueMap(II, ResultReg);
+ return true;
+ }
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
- DIVariable DIVar(DI->getVariable());
- assert((!DIVar || DIVar.isVariable()) &&
- "Variable in DbgDeclareInst should be either null or a DIVariable.");
- if (!DIVar || !FuncInfo.MF->getMMI().hasDebugInfo()) {
+ assert(DI->getVariable() && "Missing variable");
+ if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
}
@@ -1122,6 +1150,8 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
false);
if (Op) {
+ assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
+ "Expected inlined-at fields to agree");
if (Op->isReg()) {
Op->setIsDebug(true);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -1146,6 +1176,8 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
const DbgValueInst *DI = cast<DbgValueInst>(II);
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
const Value *V = DI->getValue();
+ assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
+ "Expected inlined-at fields to agree");
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
@@ -1213,8 +1245,8 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
}
bool FastISel::selectCast(const User *I, unsigned Opcode) {
- EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
- EVT DstVT = TLI.getValueType(I->getType());
+ EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(DL, I->getType());
if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
!DstVT.isSimple())
@@ -1256,8 +1288,8 @@ bool FastISel::selectBitCast(const User *I) {
}
// Bitcasts of other values become reg-reg copies or BITCAST operators.
- EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType());
- EVT DstEVT = TLI.getValueType(I->getType());
+ EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
+ EVT DstEVT = TLI.getValueType(DL, I->getType());
if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
!TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
// Unhandled type. Halt "fast" selection and bail.
@@ -1318,7 +1350,7 @@ bool FastISel::selectInstruction(const Instruction *I) {
// Don't handle Intrinsic::trap if a trap funciton is specified.
if (F && F->getIntrinsicID() == Intrinsic::trap &&
- !TM.Options.getTrapFunctionName().empty())
+ Call->hasFnAttr("trap-func-name"))
return false;
}
@@ -1381,7 +1413,7 @@ bool FastISel::selectFNeg(const User *I) {
bool OpRegIsKill = hasTrivialKill(I);
// If the target has ISD::FNEG, use it.
- EVT VT = TLI.getValueType(I->getType());
+ EVT VT = TLI.getValueType(DL, I->getType());
unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
OpReg, OpRegIsKill);
if (ResultReg) {
@@ -1424,7 +1456,7 @@ bool FastISel::selectExtractValue(const User *U) {
// Make sure we only try to handle extracts with a legal result. But also
// allow i1 because it's easy.
- EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
+ EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
if (!RealVT.isSimple())
return false;
MVT VT = RealVT.getSimpleVT();
@@ -1448,7 +1480,7 @@ bool FastISel::selectExtractValue(const User *U) {
unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
SmallVector<EVT, 4> AggValueVTs;
- ComputeValueVTs(TLI, AggTy, AggValueVTs);
+ ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
for (unsigned i = 0; i < VTIndex; i++)
ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
@@ -1550,8 +1582,8 @@ bool FastISel::selectOperator(const User *I, unsigned Opcode) {
case Instruction::IntToPtr: // Deliberate fall-through.
case Instruction::PtrToInt: {
- EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
- EVT DstVT = TLI.getValueType(I->getType());
+ EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(DL, I->getType());
if (DstVT.bitsGT(SrcVT))
return selectCast(I, ISD::ZERO_EXTEND);
if (DstVT.bitsLT(SrcVT))
@@ -1580,7 +1612,7 @@ FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
bool SkipTargetIndependentISel)
: FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
- TM(FuncInfo.MF->getTarget()), DL(*MF->getSubtarget().getDataLayout()),
+ TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
TII(*MF->getSubtarget().getInstrInfo()),
TLI(*MF->getSubtarget().getTargetLowering()),
TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
@@ -1662,6 +1694,7 @@ unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
if (ResultReg)
return ResultReg;
unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
+ bool IsImmKill = true;
if (!MaterialReg) {
// This is a bit ugly/slow, but failing here means falling out of
// fast-isel, which would be very slow.
@@ -1670,9 +1703,15 @@ unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
if (!MaterialReg)
return 0;
+ // FIXME: If the materialized register here has no uses yet then this
+ // will be the first use and we should be able to mark it as killed.
+ // However, the local value area for materialising constant expressions
+ // grows down, not up, which means that any constant expressions we generate
+ // later which also use 'Imm' could be after this instruction and therefore
+ // after this kill.
+ IsImmKill = false;
}
- return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg,
- /*IsKill=*/true);
+ return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
}
unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
@@ -1998,7 +2037,7 @@ bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
// own moves. Second, this check is necessary because FastISel doesn't
// use CreateRegs to create registers, so it always creates
// exactly one register for each non-void instruction.
- EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
+ EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
// Handle integer promotions, though, because they're common and easy.
if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
OpenPOWER on IntegriCloud