summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/Sparc
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2011-02-27 01:32:10 +0000
committerdim <dim@FreeBSD.org>2011-02-27 01:32:10 +0000
commitb951d621be1d00a520871c689c1cd687b6aa3ae6 (patch)
tree5c342f2374324ffec4626f558d9aa49f323f90b4 /contrib/llvm/lib/Target/Sparc
parent4004d6a3076e94bd23e681411c43682267a202fe (diff)
parenta0fb00f9837bd0d2e5948f16f6a6b82a7a628f51 (diff)
downloadFreeBSD-src-b951d621be1d00a520871c689c1cd687b6aa3ae6.zip
FreeBSD-src-b951d621be1d00a520871c689c1cd687b6aa3ae6.tar.gz
Update llvm/clang to trunk r126547.
There are several bugfixes in this update, but the most important one is to ensure __start_ and __stop_ symbols for linker sets and kernel module metadata are always emitted in object files: http://llvm.org/bugs/show_bug.cgi?id=9292 Before this fix, if you compiled kernel modules with clang, they would not be properly processed by kldxref, and if they had any dependencies, the kernel would fail to load those. Another problem occurred when attempting to mount a tmpfs filesystem, which would result in 'operation not supported by device'.
Diffstat (limited to 'contrib/llvm/lib/Target/Sparc')
-rw-r--r--contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp31
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp43
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td18
4 files changed, 85 insertions, 9 deletions
diff --git a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
index ee29275..4b12852 100644
--- a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
@@ -79,6 +79,7 @@ namespace {
MachineBasicBlock::iterator
findDelayInstr(MachineBasicBlock &MBB, MachineBasicBlock::iterator slot);
+ bool needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize);
};
char Filler::ID = 0;
@@ -91,6 +92,7 @@ FunctionPass *llvm::createSparcDelaySlotFillerPass(TargetMachine &tm) {
return new Filler(tm);
}
+
/// runOnMachineBasicBlock - Fill in delay slots for the given basic block.
/// We assume there is only one delay slot per delayed instruction.
///
@@ -112,6 +114,13 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
BuildMI(MBB, ++J, I->getDebugLoc(), TII->get(SP::NOP));
else
MBB.splice(++J, &MBB, D);
+ unsigned structSize = 0;
+ if (needsUnimp(I, structSize)) {
+ MachineBasicBlock::iterator J = I;
+ ++J; //skip the delay filler.
+ BuildMI(MBB, ++J, I->getDebugLoc(),
+ TII->get(SP::UNIMP)).addImm(structSize);
+ }
}
return Changed;
}
@@ -287,6 +296,28 @@ bool Filler::isDelayFiller(MachineBasicBlock &MBB,
{
if (candidate == MBB.begin())
return false;
+ if (candidate->getOpcode() == SP::UNIMP)
+ return true;
const TargetInstrDesc &prevdesc = (--candidate)->getDesc();
return prevdesc.hasDelaySlot();
}
+
+bool Filler::needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize)
+{
+ if (!I->getDesc().isCall())
+ return false;
+
+ unsigned structSizeOpNum = 0;
+ switch (I->getOpcode()) {
+ default: llvm_unreachable("Unknown call opcode.");
+ case SP::CALL: structSizeOpNum = 1; break;
+ case SP::JMPLrr:
+ case SP::JMPLri: structSizeOpNum = 2; break;
+ }
+
+ const MachineOperand &MO = I->getOperand(structSizeOpNum);
+ if (!MO.isImm())
+ return false;
+ StructSize = MO.getImm();
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 196b87d..70574c3 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -16,7 +16,9 @@
#include "SparcISelLowering.h"
#include "SparcTargetMachine.h"
#include "SparcMachineFunctionInfo.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
+#include "llvm/Module.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -116,6 +118,8 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
// Guarantee that all emitted copies are stuck together with flags.
Flag = Chain.getValue(1);
}
+
+ unsigned RetAddrOffset = 8; //Call Inst + Delay Slot
// If the function returns a struct, copy the SRetReturnReg to I0
if (MF.getFunction()->hasStructRetAttr()) {
SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
@@ -127,11 +131,16 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
Flag = Chain.getValue(1);
if (MF.getRegInfo().liveout_empty())
MF.getRegInfo().addLiveOut(SP::I0);
+ RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
}
+ SDValue RetAddrOffsetNode = DAG.getConstant(RetAddrOffset, MVT::i32);
+
if (Flag.getNode())
- return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
- return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain);
+ return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain,
+ RetAddrOffsetNode, Flag);
+ return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain,
+ RetAddrOffsetNode);
}
/// LowerFormalArguments - V8 uses a very simple ABI, where all values are
@@ -194,7 +203,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
false, false, 0);
} else {
unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
- &SP::IntRegsRegClass, dl);
+ &SP::IntRegsRegClass);
LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
}
SDValue WholeValue =
@@ -393,6 +402,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVector<SDValue, 8> MemOpChains;
const unsigned StackOffset = 92;
+ bool hasStructRetAttr = false;
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
i != e;
@@ -433,6 +443,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo(),
false, false, 0));
+ hasStructRetAttr = true;
continue;
}
@@ -546,6 +557,8 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
InFlag = Chain.getValue(1);
}
+ unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0;
+
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
@@ -559,6 +572,8 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
+ if (hasStructRetAttr)
+ Ops.push_back(DAG.getTargetConstant(SRetArgSize, MVT::i32));
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
unsigned Reg = RegsToPass[i].first;
if (Reg >= SP::I0 && Reg <= SP::I7)
@@ -600,7 +615,29 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
return Chain;
}
+unsigned
+SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
+{
+ const Function *CalleeFn = 0;
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ CalleeFn = dyn_cast<Function>(G->getGlobal());
+ } else if (ExternalSymbolSDNode *E =
+ dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ const Function *Fn = DAG.getMachineFunction().getFunction();
+ const Module *M = Fn->getParent();
+ CalleeFn = M->getFunction(E->getSymbol());
+ }
+
+ if (!CalleeFn)
+ return 0;
+ assert(CalleeFn->hasStructRetAttr() &&
+ "Callee does not have the StructRet attribute.");
+
+ const PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
+ const Type *ElementTy = Ty->getElementType();
+ return getTargetData()->getTypeAllocSize(ElementTy);
+}
//===----------------------------------------------------------------------===//
// TargetLowering Implementation
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
index 849e401..7d02df8 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -101,6 +101,8 @@ namespace llvm {
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+
+ unsigned getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
index 1072323..cf5c48f 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -124,7 +124,8 @@ def call : SDNode<"SPISD::CALL", SDT_SPCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
-def retflag : SDNode<"SPISD::RET_FLAG", SDTNone,
+def SDT_SPRet : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def retflag : SDNode<"SPISD::RET_FLAG", SDT_SPRet,
[SDNPHasChain, SDNPOptInGlue]>;
def flushw : SDNode<"SPISD::FLUSHW", SDTNone,
@@ -132,7 +133,7 @@ def flushw : SDNode<"SPISD::FLUSHW", SDTNone,
def getPCX : Operand<i32> {
let PrintMethod = "printGetPCX";
-}
+}
//===----------------------------------------------------------------------===//
// SPARC Flag Conditions
@@ -232,6 +233,9 @@ let hasSideEffects = 1, mayStore = 1 in {
[(flushw)]>;
}
+def UNIMP : F2_1<0b000, (outs), (ins i32imm:$val),
+ "unimp $val", []>;
+
// FpMOVD/FpNEGD/FpABSD - These are lowered to single-precision ops by the
// fpmover pass.
let Predicates = [HasNoV9] in { // Only emit these in V8 mode.
@@ -292,11 +296,13 @@ let usesCustomInserter = 1, Uses = [FCC] in {
// Section A.3 - Synthetic Instructions, p. 85
// special cases of JMPL:
let isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1 in {
- let rd = O7.Num, rs1 = G0.Num, simm13 = 8 in
- def RETL: F3_2<2, 0b111000, (outs), (ins), "retl", [(retflag)]>;
+ let rd = O7.Num, rs1 = G0.Num in
+ def RETL: F3_2<2, 0b111000, (outs), (ins i32imm:$val),
+ "jmp %o7+$val", [(retflag simm13:$val)]>;
- let rd = I7.Num, rs1 = G0.Num, simm13 = 8 in
- def RET: F3_2<2, 0b111000, (outs), (ins), "ret", []>;
+ let rd = I7.Num, rs1 = G0.Num in
+ def RET: F3_2<2, 0b111000, (outs), (ins i32imm:$val),
+ "jmp %i7+$val", []>;
}
// Section B.1 - Load Integer Instructions, p. 90
OpenPOWER on IntegriCloud