summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/Mips/MipsFastISel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/Mips/MipsFastISel.cpp')
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFastISel.cpp241
1 files changed, 212 insertions, 29 deletions
diff --git a/contrib/llvm/lib/Target/Mips/MipsFastISel.cpp b/contrib/llvm/lib/Target/Mips/MipsFastISel.cpp
index 19c201d..a44192f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -31,6 +31,9 @@
#include "llvm/IR/GlobalVariable.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "mips-fastisel"
using namespace llvm;
@@ -95,10 +98,10 @@ class MipsFastISel final : public FastISel {
// Convenience variables to avoid some queries.
LLVMContext *Context;
+ bool fastLowerArguments() override;
bool fastLowerCall(CallLoweringInfo &CLI) override;
bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
- bool TargetSupported;
bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle
// floating point but not reject doing fast-isel in other
// situations
@@ -195,6 +198,9 @@ private:
bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
unsigned &NumBytes);
bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
+ const MipsABIInfo &getABI() const {
+ return static_cast<const MipsTargetMachine &>(TM).getABI();
+ }
public:
// Backend specific FastISel code.
@@ -205,12 +211,7 @@ public:
TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
Context = &funcInfo.Fn->getContext();
- bool ISASupported = !Subtarget->hasMips32r6() &&
- !Subtarget->inMicroMipsMode() && Subtarget->hasMips32();
- TargetSupported =
- ISASupported && TM.isPositionIndependent() &&
- (static_cast<const MipsTargetMachine &>(TM).getABI().IsO32());
- UnsupportedFPMode = Subtarget->isFP64bit();
+ UnsupportedFPMode = Subtarget->isFP64bit() || Subtarget->useSoftFloat();
}
unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
@@ -285,9 +286,6 @@ unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
}
unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
- if (!TargetSupported)
- return 0;
-
assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i32 &&
"Alloca should always return a pointer.");
@@ -398,9 +396,6 @@ unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
// Materialize a constant into a register, and return the register
// number (or zero if we failed to handle it).
unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) {
- if (!TargetSupported)
- return 0;
-
EVT CEVT = TLI.getValueType(DL, C->getType(), true);
// Only handle simple types.
@@ -443,14 +438,14 @@ bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
}
case Instruction::GetElementPtr: {
Address SavedAddr = Addr;
- uint64_t TmpOffset = Addr.getOffset();
+ int64_t TmpOffset = Addr.getOffset();
// Iterate through the GEP folding the constants into offsets where
// we can.
gep_type_iterator GTI = gep_type_begin(U);
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
++i, ++GTI) {
const Value *Op = *i;
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
@@ -703,8 +698,8 @@ bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
unsigned RegWithOne = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
- emitInst(Opc).addReg(LeftReg).addReg(RightReg).addReg(
- Mips::FCC0, RegState::ImplicitDefine);
+ emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
+ .addReg(RightReg);
emitInst(CondMovOpc, ResultReg)
.addReg(RegWithOne)
.addReg(Mips::FCC0)
@@ -761,8 +756,8 @@ bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
if (Addr.isFIBase()) {
unsigned FI = Addr.getFI();
unsigned Align = 4;
- unsigned Offset = Addr.getOffset();
- MachineFrameInfo &MFI = *MF->getFrameInfo();
+ int64_t Offset = Addr.getOffset();
+ MachineFrameInfo &MFI = MF->getFrameInfo();
MachineMemOperand *MMO = MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
MFI.getObjectSize(FI), Align);
@@ -812,8 +807,8 @@ bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr,
if (Addr.isFIBase()) {
unsigned FI = Addr.getFI();
unsigned Align = 4;
- unsigned Offset = Addr.getOffset();
- MachineFrameInfo &MFI = *MF->getFrameInfo();
+ int64_t Offset = Addr.getOffset();
+ MachineFrameInfo &MFI = MF->getFrameInfo();
MachineMemOperand *MMO = MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
MFI.getObjectSize(FI), Align);
@@ -970,9 +965,13 @@ bool MipsFastISel::selectFPExt(const Instruction *I) {
bool MipsFastISel::selectSelect(const Instruction *I) {
assert(isa<SelectInst>(I) && "Expected a select instruction.");
+ DEBUG(dbgs() << "selectSelect\n");
+
MVT VT;
- if (!isTypeSupported(I->getType(), VT))
+ if (!isTypeSupported(I->getType(), VT) || UnsupportedFPMode) {
+ DEBUG(dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
return false;
+ }
unsigned CondMovOpc;
const TargetRegisterClass *RC;
@@ -1249,10 +1248,191 @@ bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
return true;
}
-bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
- if (!TargetSupported)
+bool MipsFastISel::fastLowerArguments() {
+ DEBUG(dbgs() << "fastLowerArguments\n");
+
+ if (!FuncInfo.CanLowerReturn) {
+ DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
return false;
+ }
+
+ const Function *F = FuncInfo.Fn;
+ if (F->isVarArg()) {
+ DEBUG(dbgs() << ".. gave up (varargs)\n");
+ return false;
+ }
+
+ CallingConv::ID CC = F->getCallingConv();
+ if (CC != CallingConv::C) {
+ DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
+ return false;
+ }
+
+ const ArrayRef<MCPhysReg> GPR32ArgRegs = {Mips::A0, Mips::A1, Mips::A2,
+ Mips::A3};
+ const ArrayRef<MCPhysReg> FGR32ArgRegs = {Mips::F12, Mips::F14};
+ const ArrayRef<MCPhysReg> AFGR64ArgRegs = {Mips::D6, Mips::D7};
+ ArrayRef<MCPhysReg>::iterator NextGPR32 = GPR32ArgRegs.begin();
+ ArrayRef<MCPhysReg>::iterator NextFGR32 = FGR32ArgRegs.begin();
+ ArrayRef<MCPhysReg>::iterator NextAFGR64 = AFGR64ArgRegs.begin();
+
+ struct AllocatedReg {
+ const TargetRegisterClass *RC;
+ unsigned Reg;
+ AllocatedReg(const TargetRegisterClass *RC, unsigned Reg)
+ : RC(RC), Reg(Reg) {}
+ };
+
+ // Only handle simple cases. i.e. All arguments are directly mapped to
+ // registers of the appropriate type.
+ SmallVector<AllocatedReg, 4> Allocation;
+ unsigned Idx = 1;
+ for (const auto &FormalArg : F->args()) {
+ if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) {
+ DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
+ return false;
+ }
+
+ Type *ArgTy = FormalArg.getType();
+ if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) {
+ DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
+ return false;
+ }
+
+ EVT ArgVT = TLI.getValueType(DL, ArgTy);
+ DEBUG(dbgs() << ".. " << (Idx - 1) << ": " << ArgVT.getEVTString() << "\n");
+ if (!ArgVT.isSimple()) {
+ DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
+ return false;
+ }
+
+ switch (ArgVT.getSimpleVT().SimpleTy) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ if (!F->getAttributes().hasAttribute(Idx, Attribute::SExt) &&
+ !F->getAttributes().hasAttribute(Idx, Attribute::ZExt)) {
+ // It must be any extend, this shouldn't happen for clang-generated IR
+ // so just fall back on SelectionDAG.
+ DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
+ return false;
+ }
+
+ if (NextGPR32 == GPR32ArgRegs.end()) {
+ DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
+ return false;
+ }
+
+ DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
+ Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
+
+ // Allocating any GPR32 prohibits further use of floating point arguments.
+ NextFGR32 = FGR32ArgRegs.end();
+ NextAFGR64 = AFGR64ArgRegs.end();
+ break;
+
+ case MVT::i32:
+ if (F->getAttributes().hasAttribute(Idx, Attribute::ZExt)) {
+ // The O32 ABI does not permit a zero-extended i32.
+ DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
+ return false;
+ }
+ if (NextGPR32 == GPR32ArgRegs.end()) {
+ DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
+ return false;
+ }
+
+ DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
+ Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
+
+ // Allocating any GPR32 prohibits further use of floating point arguments.
+ NextFGR32 = FGR32ArgRegs.end();
+ NextAFGR64 = AFGR64ArgRegs.end();
+ break;
+
+ case MVT::f32:
+ if (UnsupportedFPMode) {
+ DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
+ return false;
+ }
+ if (NextFGR32 == FGR32ArgRegs.end()) {
+ DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
+ return false;
+ }
+ DEBUG(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
+ Allocation.emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
+ // Allocating an FGR32 also allocates the super-register AFGR64, and
+ // ABI rules require us to skip the corresponding GPR32.
+ if (NextGPR32 != GPR32ArgRegs.end())
+ NextGPR32++;
+ if (NextAFGR64 != AFGR64ArgRegs.end())
+ NextAFGR64++;
+ break;
+
+ case MVT::f64:
+ if (UnsupportedFPMode) {
+ DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
+ return false;
+ }
+ if (NextAFGR64 == AFGR64ArgRegs.end()) {
+ DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
+ return false;
+ }
+ DEBUG(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
+ Allocation.emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
+ // Allocating an FGR32 also allocates the super-register AFGR64, and
+ // ABI rules require us to skip the corresponding GPR32 pair.
+ if (NextGPR32 != GPR32ArgRegs.end())
+ NextGPR32++;
+ if (NextGPR32 != GPR32ArgRegs.end())
+ NextGPR32++;
+ if (NextFGR32 != FGR32ArgRegs.end())
+ NextFGR32++;
+ break;
+
+ default:
+ DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
+ return false;
+ }
+
+ ++Idx;
+ }
+
+ Idx = 0;
+ for (const auto &FormalArg : F->args()) {
+ unsigned SrcReg = Allocation[Idx].Reg;
+ unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[Idx].RC);
+ // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
+ // Without this, EmitLiveInCopies may eliminate the livein if its only
+ // use is a bitcast (which isn't turned into an instruction).
+ unsigned ResultReg = createResultReg(Allocation[Idx].RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(DstReg, getKillRegState(true));
+ updateValueMap(&FormalArg, ResultReg);
+ ++Idx;
+ }
+
+ // Calculate the size of the incoming arguments area.
+ // We currently reject all the cases where this would be non-zero.
+ unsigned IncomingArgSizeInBytes = 0;
+
+ // Account for the reserved argument area on ABI's that have one (O32).
+ // It seems strange to do this on the caller side but it's necessary in
+ // SelectionDAG's implementation.
+ IncomingArgSizeInBytes = std::min(getABI().GetCalleeAllocdArgSizeInBytes(CC),
+ IncomingArgSizeInBytes);
+
+ MF->getInfo<MipsFunctionInfo>()->setFormalArgInfo(IncomingArgSizeInBytes,
+ false);
+
+ return true;
+}
+
+bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
CallingConv::ID CC = CLI.CallConv;
bool IsTailCall = CLI.IsTailCall;
bool IsVarArg = CLI.IsVarArg;
@@ -1337,9 +1517,6 @@ bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
}
bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
- if (!TargetSupported)
- return false;
-
switch (II->getIntrinsicID()) {
default:
return false;
@@ -1435,6 +1612,8 @@ bool MipsFastISel::selectRet(const Instruction *I) {
const Function &F = *I->getParent()->getParent();
const ReturnInst *Ret = cast<ReturnInst>(I);
+ DEBUG(dbgs() << "selectRet\n");
+
if (!FuncInfo.CanLowerReturn)
return false;
@@ -1495,6 +1674,12 @@ bool MipsFastISel::selectRet(const Instruction *I) {
if (RVVT == MVT::f128)
return false;
+ // Do not handle FGR64 returns for now.
+ if (RVVT == MVT::f64 && UnsupportedFPMode) {
+ DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
+ return false;
+ }
+
MVT DestVT = VA.getValVT();
// Special handling for extended integers.
if (RVVT != DestVT) {
@@ -1778,8 +1963,6 @@ bool MipsFastISel::selectShift(const Instruction *I) {
}
bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
- if (!TargetSupported)
- return false;
switch (I->getOpcode()) {
default:
break;
OpenPOWER on IntegriCloud