summaryrefslogtreecommitdiffstats
path: root/lib/Target/R600
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/R600')
-rw-r--r--lib/Target/R600/AMDGPU.h66
-rw-r--r--lib/Target/R600/AMDGPU.td85
-rw-r--r--lib/Target/R600/AMDGPUAsmPrinter.cpp86
-rw-r--r--lib/Target/R600/AMDGPUAsmPrinter.h12
-rw-r--r--lib/Target/R600/AMDGPUCallingConv.td46
-rw-r--r--lib/Target/R600/AMDGPUFrameLowering.cpp23
-rw-r--r--lib/Target/R600/AMDGPUISelDAGToDAG.cpp585
-rw-r--r--lib/Target/R600/AMDGPUISelLowering.cpp445
-rw-r--r--lib/Target/R600/AMDGPUISelLowering.h56
-rw-r--r--lib/Target/R600/AMDGPUIndirectAddressing.cpp343
-rw-r--r--lib/Target/R600/AMDGPUInstrInfo.cpp140
-rw-r--r--lib/Target/R600/AMDGPUInstrInfo.h50
-rw-r--r--lib/Target/R600/AMDGPUInstrInfo.td22
-rw-r--r--lib/Target/R600/AMDGPUInstructions.td268
-rw-r--r--lib/Target/R600/AMDGPUIntrinsics.td2
-rw-r--r--lib/Target/R600/AMDGPUMCInstLower.cpp43
-rw-r--r--lib/Target/R600/AMDGPUMachineFunction.cpp9
-rw-r--r--lib/Target/R600/AMDGPUMachineFunction.h9
-rw-r--r--lib/Target/R600/AMDGPURegisterInfo.cpp38
-rw-r--r--lib/Target/R600/AMDGPURegisterInfo.h11
-rw-r--r--lib/Target/R600/AMDGPURegisterInfo.td3
-rw-r--r--lib/Target/R600/AMDGPUStructurizeCFG.cpp896
-rw-r--r--lib/Target/R600/AMDGPUSubtarget.cpp78
-rw-r--r--lib/Target/R600/AMDGPUSubtarget.h33
-rw-r--r--lib/Target/R600/AMDGPUTargetMachine.cpp81
-rw-r--r--lib/Target/R600/AMDGPUTargetMachine.h61
-rw-r--r--lib/Target/R600/AMDGPUTargetTransformInfo.cpp90
-rw-r--r--lib/Target/R600/AMDIL.h121
-rw-r--r--lib/Target/R600/AMDIL7XXDevice.cpp115
-rw-r--r--lib/Target/R600/AMDIL7XXDevice.h72
-rw-r--r--lib/Target/R600/AMDILBase.td64
-rw-r--r--lib/Target/R600/AMDILCFGStructurizer.cpp4133
-rw-r--r--lib/Target/R600/AMDILDevice.cpp132
-rw-r--r--lib/Target/R600/AMDILDevice.h117
-rw-r--r--lib/Target/R600/AMDILDeviceInfo.cpp97
-rw-r--r--lib/Target/R600/AMDILDeviceInfo.h88
-rw-r--r--lib/Target/R600/AMDILDevices.h19
-rw-r--r--lib/Target/R600/AMDILEvergreenDevice.cpp169
-rw-r--r--lib/Target/R600/AMDILEvergreenDevice.h93
-rw-r--r--lib/Target/R600/AMDILISelDAGToDAG.cpp666
-rw-r--r--lib/Target/R600/AMDILISelLowering.cpp61
-rw-r--r--lib/Target/R600/AMDILInstrInfo.td67
-rw-r--r--lib/Target/R600/AMDILIntrinsicInfo.cpp4
-rw-r--r--lib/Target/R600/AMDILNIDevice.cpp65
-rw-r--r--lib/Target/R600/AMDILNIDevice.h57
-rw-r--r--lib/Target/R600/AMDILSIDevice.cpp48
-rw-r--r--lib/Target/R600/AMDILSIDevice.h39
-rw-r--r--lib/Target/R600/CMakeLists.txt18
-rw-r--r--lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp137
-rw-r--r--lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h4
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp8
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp13
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h4
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp21
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h1
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp2
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h4
-rw-r--r--lib/Target/R600/MCTargetDesc/CMakeLists.txt1
-rw-r--r--lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp102
-rw-r--r--lib/Target/R600/Processors.td48
-rw-r--r--lib/Target/R600/R600ClauseMergePass.cpp204
-rw-r--r--lib/Target/R600/R600ControlFlowFinalizer.cpp152
-rw-r--r--lib/Target/R600/R600Defines.h122
-rw-r--r--lib/Target/R600/R600EmitClauseMarkers.cpp164
-rw-r--r--lib/Target/R600/R600ExpandSpecialInstrs.cpp90
-rw-r--r--lib/Target/R600/R600ISelLowering.cpp1299
-rw-r--r--lib/Target/R600/R600ISelLowering.h14
-rw-r--r--lib/Target/R600/R600InstrFormats.td492
-rw-r--r--lib/Target/R600/R600InstrInfo.cpp825
-rw-r--r--lib/Target/R600/R600InstrInfo.h121
-rw-r--r--lib/Target/R600/R600Instructions.td1715
-rw-r--r--lib/Target/R600/R600Intrinsics.td44
-rw-r--r--lib/Target/R600/R600MachineFunctionInfo.cpp6
-rw-r--r--lib/Target/R600/R600MachineFunctionInfo.h3
-rw-r--r--lib/Target/R600/R600MachineScheduler.cpp300
-rw-r--r--lib/Target/R600/R600MachineScheduler.h44
-rw-r--r--lib/Target/R600/R600OptimizeVectorRegisters.cpp380
-rw-r--r--lib/Target/R600/R600Packetizer.cpp311
-rw-r--r--lib/Target/R600/R600RegisterInfo.cpp53
-rw-r--r--lib/Target/R600/R600RegisterInfo.h13
-rw-r--r--lib/Target/R600/R600RegisterInfo.td79
-rw-r--r--lib/Target/R600/R600Schedule.td6
-rw-r--r--lib/Target/R600/R600TextureIntrinsicsReplacer.cpp303
-rw-r--r--lib/Target/R600/SIAnnotateControlFlow.cpp16
-rw-r--r--lib/Target/R600/SIDefines.h16
-rw-r--r--lib/Target/R600/SIFixSGPRCopies.cpp263
-rw-r--r--lib/Target/R600/SIISelLowering.cpp756
-rw-r--r--lib/Target/R600/SIISelLowering.h31
-rw-r--r--lib/Target/R600/SIInsertWaits.cpp34
-rw-r--r--lib/Target/R600/SIInstrFormats.td116
-rw-r--r--lib/Target/R600/SIInstrInfo.cpp524
-rw-r--r--lib/Target/R600/SIInstrInfo.h80
-rw-r--r--lib/Target/R600/SIInstrInfo.td286
-rw-r--r--lib/Target/R600/SIInstructions.td798
-rw-r--r--lib/Target/R600/SIIntrinsics.td26
-rw-r--r--lib/Target/R600/SILowerControlFlow.cpp38
-rw-r--r--lib/Target/R600/SIMachineFunctionInfo.cpp4
-rw-r--r--lib/Target/R600/SIMachineFunctionInfo.h1
-rw-r--r--lib/Target/R600/SIRegisterInfo.cpp88
-rw-r--r--lib/Target/R600/SIRegisterInfo.h26
-rw-r--r--lib/Target/R600/SIRegisterInfo.td18
-rw-r--r--lib/Target/R600/SITypeRewriter.cpp162
-rw-r--r--lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp2
103 files changed, 11453 insertions, 8743 deletions
diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h
index 9792bd8..025b28e 100644
--- a/lib/Target/R600/AMDGPU.h
+++ b/lib/Target/R600/AMDGPU.h
@@ -11,32 +11,47 @@
#ifndef AMDGPU_H
#define AMDGPU_H
-#include "AMDGPUTargetMachine.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
-class FunctionPass;
+class AMDGPUInstrPrinter;
class AMDGPUTargetMachine;
+class FunctionPass;
+class MCAsmInfo;
+class raw_ostream;
+class Target;
+class TargetMachine;
// R600 Passes
-FunctionPass* createR600KernelParametersPass(const DataLayout *TD);
+FunctionPass *createR600VectorRegMerger(TargetMachine &tm);
+FunctionPass *createR600TextureIntrinsicsReplacer();
FunctionPass *createR600ExpandSpecialInstrsPass(TargetMachine &tm);
FunctionPass *createR600EmitClauseMarkers(TargetMachine &tm);
+FunctionPass *createR600ClauseMergePass(TargetMachine &tm);
FunctionPass *createR600Packetizer(TargetMachine &tm);
FunctionPass *createR600ControlFlowFinalizer(TargetMachine &tm);
+FunctionPass *createAMDGPUCFGStructurizerPass(TargetMachine &tm);
// SI Passes
+FunctionPass *createSITypeRewriter();
FunctionPass *createSIAnnotateControlFlowPass();
FunctionPass *createSILowerControlFlowPass(TargetMachine &tm);
+FunctionPass *createSIFixSGPRCopiesPass(TargetMachine &tm);
FunctionPass *createSICodeEmitterPass(formatted_raw_ostream &OS);
FunctionPass *createSIInsertWaits(TargetMachine &tm);
// Passes common to R600 and SI
Pass *createAMDGPUStructurizeCFGPass();
FunctionPass *createAMDGPUConvertToISAPass(TargetMachine &tm);
-FunctionPass* createAMDGPUIndirectAddressingPass(TargetMachine &tm);
+FunctionPass *createAMDGPUISelDag(TargetMachine &tm);
+
+/// \brief Creates an AMDGPU-specific Target Transformation Info pass.
+ImmutablePass *
+createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM);
+
+extern Target TheAMDGPUTarget;
} // End namespace llvm
@@ -49,4 +64,47 @@ namespace ShaderType {
};
}
+/// OpenCL uses address spaces to differentiate between
+/// various memory regions on the hardware. On the CPU
+/// all of the address spaces point to the same memory,
+/// however on the GPU, each address space points to
+/// a seperate piece of memory that is unique from other
+/// memory locations.
+namespace AMDGPUAS {
+enum AddressSpaces {
+ PRIVATE_ADDRESS = 0, ///< Address space for private memory.
+ GLOBAL_ADDRESS = 1, ///< Address space for global memory (RAT0, VTX0).
+ CONSTANT_ADDRESS = 2, ///< Address space for constant memory
+ LOCAL_ADDRESS = 3, ///< Address space for local memory.
+ REGION_ADDRESS = 4, ///< Address space for region memory.
+ ADDRESS_NONE = 5, ///< Address space for unknown memory.
+ PARAM_D_ADDRESS = 6, ///< Address space for direct addressible parameter memory (CONST0)
+ PARAM_I_ADDRESS = 7, ///< Address space for indirect addressible parameter memory (VTX1)
+
+ // Do not re-order the CONSTANT_BUFFER_* enums. Several places depend on this
+ // order to be able to dynamically index a constant buffer, for example:
+ //
+ // ConstantBufferAS = CONSTANT_BUFFER_0 + CBIdx
+
+ CONSTANT_BUFFER_0 = 8,
+ CONSTANT_BUFFER_1 = 9,
+ CONSTANT_BUFFER_2 = 10,
+ CONSTANT_BUFFER_3 = 11,
+ CONSTANT_BUFFER_4 = 12,
+ CONSTANT_BUFFER_5 = 13,
+ CONSTANT_BUFFER_6 = 14,
+ CONSTANT_BUFFER_7 = 15,
+ CONSTANT_BUFFER_8 = 16,
+ CONSTANT_BUFFER_9 = 17,
+ CONSTANT_BUFFER_10 = 18,
+ CONSTANT_BUFFER_11 = 19,
+ CONSTANT_BUFFER_12 = 20,
+ CONSTANT_BUFFER_13 = 21,
+ CONSTANT_BUFFER_14 = 22,
+ CONSTANT_BUFFER_15 = 23,
+ LAST_ADDRESS = 24
+};
+
+} // namespace AMDGPUAS
+
#endif // AMDGPU_H
diff --git a/lib/Target/R600/AMDGPU.td b/lib/Target/R600/AMDGPU.td
index 1a26c77..182235b 100644
--- a/lib/Target/R600/AMDGPU.td
+++ b/lib/Target/R600/AMDGPU.td
@@ -10,6 +10,91 @@
// Include AMDIL TD files
include "AMDILBase.td"
+//===----------------------------------------------------------------------===//
+// Subtarget Features
+//===----------------------------------------------------------------------===//
+
+// Debugging Features
+
+def FeatureDumpCode : SubtargetFeature <"DumpCode",
+ "DumpCode",
+ "true",
+ "Dump MachineInstrs in the CodeEmitter">;
+
+def FeatureIRStructurizer : SubtargetFeature <"disable-irstructurizer",
+ "EnableIRStructurizer",
+ "false",
+ "Disable IR Structurizer">;
+
+// Target features
+
+def FeatureIfCvt : SubtargetFeature <"disable-ifcvt",
+ "EnableIfCvt",
+ "false",
+ "Disable the if conversion pass">;
+
+def FeatureFP64 : SubtargetFeature<"fp64",
+ "FP64",
+ "true",
+ "Enable 64bit double precision operations">;
+
+def Feature64BitPtr : SubtargetFeature<"64BitPtr",
+ "Is64bit",
+ "true",
+ "Specify if 64bit addressing should be used.">;
+
+def Feature32on64BitPtr : SubtargetFeature<"64on32BitPtr",
+ "Is32on64bit",
+ "false",
+ "Specify if 64bit sized pointers with 32bit addressing should be used.">;
+
+def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst",
+ "R600ALUInst",
+ "false",
+ "Older version of ALU instructions encoding.">;
+
+def FeatureVertexCache : SubtargetFeature<"HasVertexCache",
+ "HasVertexCache",
+ "true",
+ "Specify use of dedicated vertex cache.">;
+
+def FeatureCaymanISA : SubtargetFeature<"caymanISA",
+ "CaymanISA",
+ "true",
+ "Use Cayman ISA">;
+
+class SubtargetFeatureFetchLimit <string Value> :
+ SubtargetFeature <"fetch"#Value,
+ "TexVTXClauseSize",
+ Value,
+ "Limit the maximum number of fetches in a clause to "#Value>;
+
+def FeatureFetchLimit8 : SubtargetFeatureFetchLimit <"8">;
+def FeatureFetchLimit16 : SubtargetFeatureFetchLimit <"16">;
+
+class SubtargetFeatureGeneration <string Value,
+ list<SubtargetFeature> Implies> :
+ SubtargetFeature <Value, "Gen", "AMDGPUSubtarget::"#Value,
+ Value#" GPU generation", Implies>;
+
+def FeatureR600 : SubtargetFeatureGeneration<"R600",
+ [FeatureR600ALUInst, FeatureFetchLimit8]>;
+
+def FeatureR700 : SubtargetFeatureGeneration<"R700",
+ [FeatureFetchLimit16]>;
+
+def FeatureEvergreen : SubtargetFeatureGeneration<"EVERGREEN",
+ [FeatureFetchLimit16]>;
+
+def FeatureNorthernIslands : SubtargetFeatureGeneration<"NORTHERN_ISLANDS",
+ [FeatureFetchLimit16]>;
+
+def FeatureSouthernIslands : SubtargetFeatureGeneration<"SOUTHERN_ISLANDS",
+ [Feature64BitPtr, FeatureFP64]>;
+
+def FeatureSeaIslands : SubtargetFeatureGeneration<"SEA_ISLANDS",
+ [Feature64BitPtr, FeatureFP64]>;
+//===----------------------------------------------------------------------===//
def AMDGPUInstrInfo : InstrInfo {
let guessInstructionProperties = 1;
diff --git a/lib/Target/R600/AMDGPUAsmPrinter.cpp b/lib/Target/R600/AMDGPUAsmPrinter.cpp
index 4c35ecf..67bdba2 100644
--- a/lib/Target/R600/AMDGPUAsmPrinter.cpp
+++ b/lib/Target/R600/AMDGPUAsmPrinter.cpp
@@ -19,16 +19,17 @@
#include "AMDGPUAsmPrinter.h"
#include "AMDGPU.h"
-#include "SIDefines.h"
-#include "SIMachineFunctionInfo.h"
-#include "SIRegisterInfo.h"
#include "R600Defines.h"
#include "R600MachineFunctionInfo.h"
#include "R600RegisterInfo.h"
+#include "SIDefines.h"
+#include "SIMachineFunctionInfo.h"
+#include "SIRegisterInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/ELF.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
@@ -44,32 +45,60 @@ extern "C" void LLVMInitializeR600AsmPrinter() {
TargetRegistry::RegisterAsmPrinter(TheAMDGPUTarget, createAMDGPUAsmPrinterPass);
}
+AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer)
+{
+ DisasmEnabled = TM.getSubtarget<AMDGPUSubtarget>().dumpCode() &&
+ ! Streamer.hasRawTextSupport();
+}
+
/// We need to override this function so we can avoid
/// the call to EmitFunctionHeader(), which the MCPureStreamer can't handle.
bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
- const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
- if (STM.dumpCode()) {
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- MF.dump();
-#endif
- }
SetupMachineFunction(MF);
if (OutStreamer.hasRawTextSupport()) {
OutStreamer.EmitRawText("@" + MF.getName() + ":");
}
- const MCSectionELF *ConfigSection = getObjFileLowering().getContext()
- .getELFSection(".AMDGPU.config",
+ MCContext &Context = getObjFileLowering().getContext();
+ const MCSectionELF *ConfigSection = Context.getELFSection(".AMDGPU.config",
ELF::SHT_PROGBITS, 0,
SectionKind::getReadOnly());
OutStreamer.SwitchSection(ConfigSection);
- if (STM.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
+ const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
+ if (STM.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
EmitProgramInfoSI(MF);
} else {
EmitProgramInfoR600(MF);
}
+
+ DisasmLines.clear();
+ HexLines.clear();
+ DisasmLineMaxLen = 0;
+
OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
EmitFunctionBody();
+
+ if (STM.dumpCode()) {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ MF.dump();
+#endif
+
+ if (DisasmEnabled) {
+ OutStreamer.SwitchSection(Context.getELFSection(".AMDGPU.disasm",
+ ELF::SHT_NOTE, 0,
+ SectionKind::getReadOnly()));
+
+ for (size_t i = 0; i < DisasmLines.size(); ++i) {
+ std::string Comment(DisasmLineMaxLen - DisasmLines[i].size(), ' ');
+ Comment += " ; " + HexLines[i] + "\n";
+
+ OutStreamer.EmitBytes(StringRef(DisasmLines[i]));
+ OutStreamer.EmitBytes(StringRef(Comment));
+ }
+ }
+ }
+
return false;
}
@@ -105,7 +134,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) {
}
unsigned RsrcReg;
- if (STM.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX) {
+ if (STM.getGeneration() >= AMDGPUSubtarget::EVERGREEN) {
// Evergreen / Northern Islands
switch (MFI->ShaderType) {
default: // Fall through
@@ -130,9 +159,15 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(MachineFunction &MF) {
S_STACK_SIZE(MFI->StackSize), 4);
OutStreamer.EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
OutStreamer.EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
+
+ if (MFI->ShaderType == ShaderType::COMPUTE) {
+ OutStreamer.EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
+ OutStreamer.EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4);
+ }
}
void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
+ const AMDGPUSubtarget &STM = TM.getSubtarget<AMDGPUSubtarget>();
unsigned MaxSGPR = 0;
unsigned MaxVGPR = 0;
bool VCCUsed = false;
@@ -148,7 +183,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
unsigned numOperands = MI.getNumOperands();
for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
- MachineOperand & MO = MI.getOperand(op_idx);
+ MachineOperand &MO = MI.getOperand(op_idx);
unsigned maxUsed;
unsigned width = 0;
bool isSGPR = false;
@@ -162,8 +197,10 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
VCCUsed = true;
continue;
}
+
switch (reg) {
default: break;
+ case AMDGPU::SCC:
case AMDGPU::EXEC:
case AMDGPU::M0:
continue;
@@ -196,6 +233,9 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
} else if (AMDGPU::VReg_256RegClass.contains(reg)) {
isSGPR = false;
width = 8;
+ } else if (AMDGPU::SReg_512RegClass.contains(reg)) {
+ isSGPR = true;
+ width = 16;
} else if (AMDGPU::VReg_512RegClass.contains(reg)) {
isSGPR = false;
width = 16;
@@ -227,7 +267,25 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(MachineFunction &MF) {
OutStreamer.EmitIntValue(RsrcReg, 4);
OutStreamer.EmitIntValue(S_00B028_VGPRS(MaxVGPR / 4) | S_00B028_SGPRS(MaxSGPR / 8), 4);
+
+ unsigned LDSAlignShift;
+ if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
+ // LDS is allocated in 64 dword blocks
+ LDSAlignShift = 8;
+ } else {
+ // LDS is allocated in 128 dword blocks
+ LDSAlignShift = 9;
+ }
+ unsigned LDSBlocks =
+ RoundUpToAlignment(MFI->LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
+
+ if (MFI->ShaderType == ShaderType::COMPUTE) {
+ OutStreamer.EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4);
+ OutStreamer.EmitIntValue(S_00B84C_LDS_SIZE(LDSBlocks), 4);
+ }
if (MFI->ShaderType == ShaderType::PIXEL) {
+ OutStreamer.EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4);
+ OutStreamer.EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(LDSBlocks), 4);
OutStreamer.EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
OutStreamer.EmitIntValue(MFI->PSInputAddr, 4);
}
diff --git a/lib/Target/R600/AMDGPUAsmPrinter.h b/lib/Target/R600/AMDGPUAsmPrinter.h
index f425ef4..05dc9bb 100644
--- a/lib/Target/R600/AMDGPUAsmPrinter.h
+++ b/lib/Target/R600/AMDGPUAsmPrinter.h
@@ -1,4 +1,4 @@
-//===-- AMDGPUAsmPrinter.h - Print AMDGPU assembly code -------------------===//
+//===-- AMDGPUAsmPrinter.h - Print AMDGPU assembly code ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,14 +16,15 @@
#define AMDGPU_ASMPRINTER_H
#include "llvm/CodeGen/AsmPrinter.h"
+#include <string>
+#include <vector>
namespace llvm {
class AMDGPUAsmPrinter : public AsmPrinter {
public:
- explicit AMDGPUAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) { }
+ explicit AMDGPUAsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -38,6 +39,11 @@ public:
/// Implemented in AMDGPUMCInstLower.cpp
virtual void EmitInstruction(const MachineInstr *MI);
+
+protected:
+ bool DisasmEnabled;
+ std::vector<std::string> DisasmLines, HexLines;
+ size_t DisasmLineMaxLen;
};
} // End anonymous llvm
diff --git a/lib/Target/R600/AMDGPUCallingConv.td b/lib/Target/R600/AMDGPUCallingConv.td
index 9c30515..65cdb24 100644
--- a/lib/Target/R600/AMDGPUCallingConv.td
+++ b/lib/Target/R600/AMDGPUCallingConv.td
@@ -19,12 +19,13 @@ def CC_SI : CallingConv<[
CCIfInReg<CCIfType<[f32, i32] , CCAssignToReg<[
SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
- SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15
+ SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
+ SGPR16
]>>>,
CCIfInReg<CCIfType<[i64] , CCAssignToRegWithShadow<
[ SGPR0, SGPR2, SGPR4, SGPR6, SGPR8, SGPR10, SGPR12, SGPR14 ],
- [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR12, SGPR15 ]
+ [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR13, SGPR15 ]
>>>,
CCIfNotInReg<CCIfType<[f32, i32] , CCAssignToReg<[
@@ -34,15 +35,40 @@ def CC_SI : CallingConv<[
VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31
]>>>,
- // This is the default for i64 values.
- // XXX: We should change this once clang understands the CC_AMDGPU.
- CCIfType<[i64], CCAssignToRegWithShadow<
- [ SGPR0, SGPR2, SGPR4, SGPR6, SGPR8, SGPR10, SGPR12, SGPR14 ],
- [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR13, SGPR15 ]
- >>
+ CCIfByVal<CCIfType<[i64] , CCAssignToRegWithShadow<
+ [ SGPR0, SGPR2, SGPR4, SGPR6, SGPR8, SGPR10, SGPR12, SGPR14 ],
+ [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR13, SGPR15 ]
+ >>>
+
+]>;
+
+// Calling convention for R600
+def CC_R600 : CallingConv<[
+ CCIfInReg<CCIfType<[v4f32, v4i32] , CCAssignToReg<[
+ T0_XYZW, T1_XYZW, T2_XYZW, T3_XYZW, T4_XYZW, T5_XYZW, T6_XYZW, T7_XYZW,
+ T8_XYZW, T9_XYZW, T10_XYZW, T11_XYZW, T12_XYZW, T13_XYZW, T14_XYZW, T15_XYZW,
+ T16_XYZW, T17_XYZW, T18_XYZW, T19_XYZW, T20_XYZW, T21_XYZW, T22_XYZW,
+ T23_XYZW, T24_XYZW, T25_XYZW, T26_XYZW, T27_XYZW, T28_XYZW, T29_XYZW,
+ T30_XYZW, T31_XYZW, T32_XYZW
+ ]>>>
+]>;
+
+// Calling convention for compute kernels
+def CC_AMDGPU_Kernel : CallingConv<[
+ CCCustom<"allocateStack">
]>;
def CC_AMDGPU : CallingConv<[
- CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().device()"#
- "->getGeneration() == AMDGPUDeviceInfo::HD7XXX", CCDelegateTo<CC_SI>>
+ CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().getGeneration() >= "
+ "AMDGPUSubtarget::SOUTHERN_ISLANDS && "
+ "State.getMachineFunction().getInfo<SIMachineFunctionInfo>()->"#
+ "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>,
+ CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().getGeneration() < "
+ "AMDGPUSubtarget::SOUTHERN_ISLANDS && "
+ "State.getMachineFunction().getInfo<R600MachineFunctionInfo>()->"
+ "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>,
+ CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>()"#
+ ".getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS", CCDelegateTo<CC_SI>>,
+ CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>()"#
+ ".getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS", CCDelegateTo<CC_R600>>
]>;
diff --git a/lib/Target/R600/AMDGPUFrameLowering.cpp b/lib/Target/R600/AMDGPUFrameLowering.cpp
index 815d6f7..40f14d2 100644
--- a/lib/Target/R600/AMDGPUFrameLowering.cpp
+++ b/lib/Target/R600/AMDGPUFrameLowering.cpp
@@ -78,27 +78,8 @@ int AMDGPUFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
int UpperBound = FI == -1 ? MFI->getNumObjects() : FI;
for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) {
- const AllocaInst *Alloca = MFI->getObjectAllocation(i);
- unsigned ArrayElements;
- const Type *AllocaType = Alloca->getAllocatedType();
- const Type *ElementType;
-
- if (AllocaType->isArrayTy()) {
- ArrayElements = AllocaType->getArrayNumElements();
- ElementType = AllocaType->getArrayElementType();
- } else {
- ArrayElements = 1;
- ElementType = AllocaType;
- }
-
- unsigned VectorElements;
- if (ElementType->isVectorTy()) {
- VectorElements = ElementType->getVectorNumElements();
- } else {
- VectorElements = 1;
- }
-
- Offset += (VectorElements / getStackWidth(MF)) * ArrayElements;
+ unsigned Size = MFI->getObjectSize(i);
+ Offset += (Size / (getStackWidth(MF) * 4));
}
return Offset;
}
diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
new file mode 100644
index 0000000..a989135
--- /dev/null
+++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
@@ -0,0 +1,585 @@
+//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief Defines an instruction selector for the AMDGPU target.
+//
+//===----------------------------------------------------------------------===//
+#include "AMDGPUInstrInfo.h"
+#include "AMDGPUISelLowering.h" // For AMDGPUISD
+#include "AMDGPURegisterInfo.h"
+#include "R600InstrInfo.h"
+#include "SIISelLowering.h"
+#include "llvm/ADT/ValueMap.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Support/Compiler.h"
+#include <list>
+#include <queue>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Instruction Selector Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// AMDGPU specific code to select AMDGPU machine instructions for
+/// SelectionDAG operations.
+class AMDGPUDAGToDAGISel : public SelectionDAGISel {
+ // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
+ // make the right decision when generating code for different targets.
+ const AMDGPUSubtarget &Subtarget;
+public:
+ AMDGPUDAGToDAGISel(TargetMachine &TM);
+ virtual ~AMDGPUDAGToDAGISel();
+
+ SDNode *Select(SDNode *N);
+ virtual const char *getPassName() const;
+ virtual void PostprocessISelDAG();
+
+private:
+ inline SDValue getSmallIPtrImm(unsigned Imm);
+ bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
+ const R600InstrInfo *TII);
+ bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
+ bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
+
+ // Complex pattern selectors
+ bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
+ bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
+ bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
+ SDValue SimplifyI24(SDValue &Op);
+ bool SelectI24(SDValue Addr, SDValue &Op);
+ bool SelectU24(SDValue Addr, SDValue &Op);
+
+ static bool checkType(const Value *ptr, unsigned int addrspace);
+
+ static bool isGlobalStore(const StoreSDNode *N);
+ static bool isPrivateStore(const StoreSDNode *N);
+ static bool isLocalStore(const StoreSDNode *N);
+ static bool isRegionStore(const StoreSDNode *N);
+
+ bool isCPLoad(const LoadSDNode *N) const;
+ bool isConstantLoad(const LoadSDNode *N, int cbID) const;
+ bool isGlobalLoad(const LoadSDNode *N) const;
+ bool isParamLoad(const LoadSDNode *N) const;
+ bool isPrivateLoad(const LoadSDNode *N) const;
+ bool isLocalLoad(const LoadSDNode *N) const;
+ bool isRegionLoad(const LoadSDNode *N) const;
+
+ const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
+ bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
+ bool SelectGlobalValueVariableOffset(SDValue Addr,
+ SDValue &BaseReg, SDValue& Offset);
+ bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
+ bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
+
+ // Include the pieces autogenerated from the target description.
+#include "AMDGPUGenDAGISel.inc"
+};
+} // end anonymous namespace
+
+/// \brief This pass converts a legalized DAG into a AMDGPU-specific
+// DAG, ready for instruction scheduling.
+FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
+ ) {
+ return new AMDGPUDAGToDAGISel(TM);
+}
+
+AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
+ : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
+}
+
+AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
+}
+
+/// \brief Determine the register class for \p OpNo
+/// \returns The register class of the virtual register that will be used for
+/// the given operand number \OpNo or NULL if the register class cannot be
+/// determined.
+const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
+ unsigned OpNo) const {
+ if (!N->isMachineOpcode()) {
+ return NULL;
+ }
+ switch (N->getMachineOpcode()) {
+ default: {
+ const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
+ unsigned OpIdx = Desc.getNumDefs() + OpNo;
+ if (OpIdx >= Desc.getNumOperands())
+ return NULL;
+ int RegClass = Desc.OpInfo[OpIdx].RegClass;
+ if (RegClass == -1) {
+ return NULL;
+ }
+ return TM.getRegisterInfo()->getRegClass(RegClass);
+ }
+ case AMDGPU::REG_SEQUENCE: {
+ const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(
+ cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
+ unsigned SubRegIdx =
+ dyn_cast<ConstantSDNode>(N->getOperand(OpNo + 1))->getZExtValue();
+ return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
+ }
+ }
+}
+
+SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
+ return CurDAG->getTargetConstant(Imm, MVT::i32);
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDRParam(
+ SDValue Addr, SDValue& R1, SDValue& R2) {
+
+ if (Addr.getOpcode() == ISD::FrameIndex) {
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ R2 = CurDAG->getTargetConstant(0, MVT::i32);
+ } else {
+ R1 = Addr;
+ R2 = CurDAG->getTargetConstant(0, MVT::i32);
+ }
+ } else if (Addr.getOpcode() == ISD::ADD) {
+ R1 = Addr.getOperand(0);
+ R2 = Addr.getOperand(1);
+ } else {
+ R1 = Addr;
+ R2 = CurDAG->getTargetConstant(0, MVT::i32);
+ }
+ return true;
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress) {
+ return false;
+ }
+ return SelectADDRParam(Addr, R1, R2);
+}
+
+
+bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress) {
+ return false;
+ }
+
+ if (Addr.getOpcode() == ISD::FrameIndex) {
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
+ R2 = CurDAG->getTargetConstant(0, MVT::i64);
+ } else {
+ R1 = Addr;
+ R2 = CurDAG->getTargetConstant(0, MVT::i64);
+ }
+ } else if (Addr.getOpcode() == ISD::ADD) {
+ R1 = Addr.getOperand(0);
+ R2 = Addr.getOperand(1);
+ } else {
+ R1 = Addr;
+ R2 = CurDAG->getTargetConstant(0, MVT::i64);
+ }
+ return true;
+}
+
+SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
+ unsigned int Opc = N->getOpcode();
+ if (N->isMachineOpcode()) {
+ N->setNodeId(-1);
+ return NULL; // Already selected.
+ }
+ switch (Opc) {
+ default: break;
+ case ISD::BUILD_VECTOR: {
+ unsigned RegClassID;
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ const AMDGPURegisterInfo *TRI =
+ static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
+ const SIRegisterInfo *SIRI =
+ static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
+ EVT VT = N->getValueType(0);
+ unsigned NumVectorElts = VT.getVectorNumElements();
+ assert(VT.getVectorElementType().bitsEq(MVT::i32));
+ if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ bool UseVReg = true;
+ for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
+ U != E; ++U) {
+ if (!U->isMachineOpcode()) {
+ continue;
+ }
+ const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
+ if (!RC) {
+ continue;
+ }
+ if (SIRI->isSGPRClass(RC)) {
+ UseVReg = false;
+ }
+ }
+ switch(NumVectorElts) {
+ case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
+ AMDGPU::SReg_32RegClassID;
+ break;
+ case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
+ AMDGPU::SReg_64RegClassID;
+ break;
+ case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
+ AMDGPU::SReg_128RegClassID;
+ break;
+ case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
+ AMDGPU::SReg_256RegClassID;
+ break;
+ case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
+ AMDGPU::SReg_512RegClassID;
+ break;
+ default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
+ }
+ } else {
+ // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
+ // that adds a 128 bits reg copy when going through TwoAddressInstructions
+ // pass. We want to avoid 128 bits copies as much as possible because they
+ // can't be bundled by our scheduler.
+ switch(NumVectorElts) {
+ case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
+ case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
+ default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
+ }
+ }
+
+ SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
+
+ if (NumVectorElts == 1) {
+ return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS,
+ VT.getVectorElementType(),
+ N->getOperand(0), RegClass);
+ }
+
+ assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
+ "supported yet");
+ // 16 = Max Num Vector Elements
+ // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
+ // 1 = Vector Register Class
+ SDValue RegSeqArgs[16 * 2 + 1];
+
+ RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
+ bool IsRegSeq = true;
+ for (unsigned i = 0; i < N->getNumOperands(); i++) {
+ // XXX: Why is this here?
+ if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
+ IsRegSeq = false;
+ break;
+ }
+ RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
+ RegSeqArgs[1 + (2 * i) + 1] =
+ CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
+ }
+ if (!IsRegSeq)
+ break;
+ return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
+ RegSeqArgs, 2 * N->getNumOperands() + 1);
+ }
+ case ISD::BUILD_PAIR: {
+ SDValue RC, SubReg0, SubReg1;
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ break;
+ }
+ if (N->getValueType(0) == MVT::i128) {
+ RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
+ SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
+ SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
+ } else if (N->getValueType(0) == MVT::i64) {
+ RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
+ SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
+ SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
+ } else {
+ llvm_unreachable("Unhandled value type for BUILD_PAIR");
+ }
+ const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
+ N->getOperand(1), SubReg1 };
+ return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
+ SDLoc(N), N->getValueType(0), Ops);
+ }
+ case AMDGPUISD::REGISTER_LOAD: {
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ break;
+ SDValue Addr, Offset;
+
+ SelectADDRIndirect(N->getOperand(1), Addr, Offset);
+ const SDValue Ops[] = {
+ Addr,
+ Offset,
+ CurDAG->getTargetConstant(0, MVT::i32),
+ N->getOperand(0),
+ };
+ return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
+ CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
+ Ops);
+ }
+ case AMDGPUISD::REGISTER_STORE: {
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ break;
+ SDValue Addr, Offset;
+ SelectADDRIndirect(N->getOperand(2), Addr, Offset);
+ const SDValue Ops[] = {
+ N->getOperand(1),
+ Addr,
+ Offset,
+ CurDAG->getTargetConstant(0, MVT::i32),
+ N->getOperand(0),
+ };
+ return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
+ CurDAG->getVTList(MVT::Other),
+ Ops);
+ }
+ }
+ return SelectCode(N);
+}
+
+
+bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
+ if (!ptr) {
+ return false;
+ }
+ Type *ptrType = ptr->getType();
+ return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
+}
+
+bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
+ return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
+ return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
+}
+
+bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
+ return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
+ return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
+ if (CbId == -1) {
+ return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
+ }
+ return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
+}
+
+bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
+ if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
+ N->getMemoryVT().bitsLT(MVT::i32)) {
+ return true;
+ }
+ }
+ return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
+ return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
+ return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
+ return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
+ MachineMemOperand *MMO = N->getMemOperand();
+ if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
+ if (MMO) {
+ const Value *V = MMO->getValue();
+ const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
+ if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
+ if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
+ // Check to make sure we are not a constant pool load or a constant load
+ // that is marked as a private load
+ if (isCPLoad(N) || isConstantLoad(N, -1)) {
+ return false;
+ }
+ }
+ if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
+ && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
+ return true;
+ }
+ return false;
+}
+
+const char *AMDGPUDAGToDAGISel::getPassName() const {
+ return "AMDGPU DAG->DAG Pattern Instruction Selection";
+}
+
+#ifdef DEBUGTMP
+#undef INT64_C
+#endif
+#undef DEBUGTMP
+
+//===----------------------------------------------------------------------===//
+// Complex Patterns
+//===----------------------------------------------------------------------===//
+
+bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
+ SDValue& IntPtr) {
+ if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
+ IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
+ return true;
+ }
+ return false;
+}
+
+bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
+ SDValue& BaseReg, SDValue &Offset) {
+ if (!dyn_cast<ConstantSDNode>(Addr)) {
+ BaseReg = Addr;
+ Offset = CurDAG->getIntPtrConstant(0, true);
+ return true;
+ }
+ return false;
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ ConstantSDNode * IMMOffset;
+
+ if (Addr.getOpcode() == ISD::ADD
+ && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
+ && isInt<16>(IMMOffset->getZExtValue())) {
+
+ Base = Addr.getOperand(0);
+ Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
+ return true;
+ // If the pointer address is constant, we can move it to the offset field.
+ } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
+ && isInt<16>(IMMOffset->getZExtValue())) {
+ Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
+ SDLoc(CurDAG->getEntryNode()),
+ AMDGPU::ZERO, MVT::i32);
+ Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
+ return true;
+ }
+
+ // Default case, no offset
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ ConstantSDNode *C;
+
+ if ((C = dyn_cast<ConstantSDNode>(Addr))) {
+ Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
+ Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
+ } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
+ (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
+ Base = Addr.getOperand(0);
+ Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
+ } else {
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ }
+
+ return true;
+}
+
+SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
+ APInt Demanded = APInt(32, 0x00FFFFFF);
+ APInt KnownZero, KnownOne;
+ TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
+ const TargetLowering *TLI = getTargetLowering();
+ if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
+ CurDAG->ReplaceAllUsesWith(Op, TLO.New);
+ CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
+ return SimplifyI24(TLO.New);
+ } else {
+ return Op;
+ }
+}
+
+bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
+
+ assert(Op.getValueType() == MVT::i32);
+
+ if (CurDAG->ComputeNumSignBits(Op) == 9) {
+ I24 = SimplifyI24(Op);
+ return true;
+ }
+ return false;
+}
+
+bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
+ APInt KnownZero;
+ APInt KnownOne;
+ CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
+
+ assert (Op.getValueType() == MVT::i32);
+
+ // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
+ // i32. These smaller types are legal to use with the i24 instructions.
+ if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
+ Op.getOpcode() == ISD::ANY_EXTEND ||
+ ISD::isEXTLoad(Op.getNode())) {
+ U24 = SimplifyI24(Op);
+ return true;
+ }
+ return false;
+}
+
+void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
+ const AMDGPUTargetLowering& Lowering =
+ (*(const AMDGPUTargetLowering*)getTargetLowering());
+ bool IsModified = false;
+ do {
+ IsModified = false;
+ // Go over all selected nodes and try to fold them a bit more
+ for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
+ E = CurDAG->allnodes_end(); I != E; ++I) {
+
+ SDNode *Node = I;
+
+ MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
+ if (!MachineNode)
+ continue;
+
+ SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
+ if (ResNode != Node) {
+ ReplaceUses(Node, ResNode);
+ IsModified = true;
+ }
+ }
+ CurDAG->RemoveDeadNodes();
+ } while (IsModified);
+}
diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp
index a266df5..c4d75ff 100644
--- a/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -14,16 +14,29 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUISelLowering.h"
+#include "AMDGPU.h"
+#include "AMDGPUFrameLowering.h"
#include "AMDGPURegisterInfo.h"
-#include "AMDILIntrinsicInfo.h"
#include "AMDGPUSubtarget.h"
+#include "AMDILIntrinsicInfo.h"
+#include "R600MachineFunctionInfo.h"
+#include "SIMachineFunctionInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/DataLayout.h"
using namespace llvm;
+static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ unsigned Offset = State.AllocateStack(ValVT.getSizeInBits() / 8, ArgFlags.getOrigAlign());
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+
+ return true;
+}
#include "AMDGPUGenCallingConv.inc"
@@ -45,26 +58,171 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::FABS, MVT::f32, Legal);
setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
setOperationAction(ISD::FRINT, MVT::f32, Legal);
+ setOperationAction(ISD::FROUND, MVT::f32, Legal);
+
+ // The hardware supports ROTR, but not ROTL
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
// Lower floating point store/load to integer store/load to reduce the number
// of patterns in tablegen.
setOperationAction(ISD::STORE, MVT::f32, Promote);
AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
+ setOperationAction(ISD::STORE, MVT::v2f32, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
+
setOperationAction(ISD::STORE, MVT::v4f32, Promote);
AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
+ setOperationAction(ISD::STORE, MVT::v8f32, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
+
+ setOperationAction(ISD::STORE, MVT::v16f32, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
+
+ setOperationAction(ISD::STORE, MVT::f64, Promote);
+ AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
+
+ // Custom lowering of vector stores is required for local address space
+ // stores.
+ setOperationAction(ISD::STORE, MVT::v4i32, Custom);
+ // XXX: Native v2i32 local address space stores are possible, but not
+ // currently implemented.
+ setOperationAction(ISD::STORE, MVT::v2i32, Custom);
+
+ setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
+ setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
+ setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
+ // XXX: This can be change to Custom, once ExpandVectorStores can
+ // handle 64-bit stores.
+ setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
+
setOperationAction(ISD::LOAD, MVT::f32, Promote);
AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
+ setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
+
setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
+ setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
+
+ setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
+
+ setOperationAction(ISD::LOAD, MVT::f64, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
+
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
+
+ setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
+ setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
+
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
+
setOperationAction(ISD::MUL, MVT::i64, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
setOperationAction(ISD::UREM, MVT::i32, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
+
+ static const MVT::SimpleValueType IntTypes[] = {
+ MVT::v2i32, MVT::v4i32
+ };
+ const size_t NumIntTypes = array_lengthof(IntTypes);
+
+ for (unsigned int x = 0; x < NumIntTypes; ++x) {
+ MVT::SimpleValueType VT = IntTypes[x];
+ //Expand the following operations for the current type by default
+ setOperationAction(ISD::ADD, VT, Expand);
+ setOperationAction(ISD::AND, VT, Expand);
+ setOperationAction(ISD::FP_TO_SINT, VT, Expand);
+ setOperationAction(ISD::FP_TO_UINT, VT, Expand);
+ setOperationAction(ISD::MUL, VT, Expand);
+ setOperationAction(ISD::OR, VT, Expand);
+ setOperationAction(ISD::SHL, VT, Expand);
+ setOperationAction(ISD::SINT_TO_FP, VT, Expand);
+ setOperationAction(ISD::SRL, VT, Expand);
+ setOperationAction(ISD::SRA, VT, Expand);
+ setOperationAction(ISD::SUB, VT, Expand);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::UINT_TO_FP, VT, Expand);
+ setOperationAction(ISD::UREM, VT, Expand);
+ setOperationAction(ISD::VSELECT, VT, Expand);
+ setOperationAction(ISD::XOR, VT, Expand);
+ }
+
+ static const MVT::SimpleValueType FloatTypes[] = {
+ MVT::v2f32, MVT::v4f32
+ };
+ const size_t NumFloatTypes = array_lengthof(FloatTypes);
+
+ for (unsigned int x = 0; x < NumFloatTypes; ++x) {
+ MVT::SimpleValueType VT = FloatTypes[x];
+ setOperationAction(ISD::FABS, VT, Expand);
+ setOperationAction(ISD::FADD, VT, Expand);
+ setOperationAction(ISD::FDIV, VT, Expand);
+ setOperationAction(ISD::FFLOOR, VT, Expand);
+ setOperationAction(ISD::FMUL, VT, Expand);
+ setOperationAction(ISD::FRINT, VT, Expand);
+ setOperationAction(ISD::FSQRT, VT, Expand);
+ setOperationAction(ISD::FSUB, VT, Expand);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Target Information
+//===----------------------------------------------------------------------===//
+
+MVT AMDGPUTargetLowering::getVectorIdxTy() const {
+ return MVT::i32;
+}
+
+bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
+ EVT CastTy) const {
+ if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
+ return true;
+
+ unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
+ unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
+
+ return ((LScalarSize <= CastScalarSize) ||
+ (CastScalarSize >= 32) ||
+ (LScalarSize < 32));
+}
+
+//===---------------------------------------------------------------------===//
+// Target Properties
+//===---------------------------------------------------------------------===//
+
+bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
+ assert(VT.isFloatingPoint());
+ return VT == MVT::f32;
+}
+
+bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
+ assert(VT.isFloatingPoint());
+ return VT == MVT::f32;
}
//===---------------------------------------------------------------------===//
@@ -83,7 +241,7 @@ SDValue AMDGPUTargetLowering::LowerReturn(
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const {
+ SDLoc DL, SelectionDAG &DAG) const {
return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
}
@@ -105,16 +263,104 @@ SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
// AMDGPU DAG lowering
+ case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
+ case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
+ case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
+ case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
}
return Op;
}
+SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
+ SDValue Op,
+ SelectionDAG &DAG) const {
+
+ const DataLayout *TD = getTargetMachine().getDataLayout();
+ GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
+
+ assert(G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS);
+ // XXX: What does the value of G->getOffset() mean?
+ assert(G->getOffset() == 0 &&
+ "Do not know what to do with an non-zero offset");
+
+ const GlobalValue *GV = G->getGlobal();
+
+ unsigned Offset;
+ if (MFI->LocalMemoryObjects.count(GV) == 0) {
+ uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
+ Offset = MFI->LDSSize;
+ MFI->LocalMemoryObjects[GV] = Offset;
+ // XXX: Account for alignment?
+ MFI->LDSSize += Size;
+ } else {
+ Offset = MFI->LocalMemoryObjects[GV];
+ }
+
+ return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
+}
+
+void AMDGPUTargetLowering::ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Args,
+ unsigned Start,
+ unsigned Count) const {
+ EVT VT = Op.getValueType();
+ for (unsigned i = Start, e = Start + Count; i != e; ++i) {
+ Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
+ VT.getVectorElementType(),
+ Op, DAG.getConstant(i, MVT::i32)));
+ }
+}
+
+SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
+ SelectionDAG &DAG) const {
+ SmallVector<SDValue, 8> Args;
+ SDValue A = Op.getOperand(0);
+ SDValue B = Op.getOperand(1);
+
+ ExtractVectorElements(A, DAG, Args, 0,
+ A.getValueType().getVectorNumElements());
+ ExtractVectorElements(B, DAG, Args, 0,
+ B.getValueType().getVectorNumElements());
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
+ &Args[0], Args.size());
+}
+
+SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ SmallVector<SDValue, 8> Args;
+ EVT VT = Op.getValueType();
+ unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ ExtractVectorElements(Op.getOperand(0), DAG, Args, Start,
+ VT.getVectorNumElements());
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
+ &Args[0], Args.size());
+}
+
+SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ const AMDGPUFrameLowering *TFL =
+ static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
+
+ FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
+ assert(FIN);
+
+ unsigned FrameIndex = FIN->getIndex();
+ unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
+ return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
+ Op.getValueType());
+}
+
SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
switch (IntrinsicID) {
@@ -154,7 +400,7 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
Op.getOperand(1));
@@ -166,7 +412,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
DAG.getConstantFP(1.0f, MVT::f32),
@@ -181,7 +427,7 @@ SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
/// \brief Generate Min/Max node
SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
@@ -238,11 +484,126 @@ SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
return Op;
}
+SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
+ SelectionDAG &DAG) const {
+ LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
+ EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
+ EVT EltVT = Op.getValueType().getVectorElementType();
+ EVT PtrVT = Load->getBasePtr().getValueType();
+ unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
+ SmallVector<SDValue, 8> Loads;
+ SDLoc SL(Op);
+
+ for (unsigned i = 0, e = NumElts; i != e; ++i) {
+ SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
+ DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
+ Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
+ Load->getChain(), Ptr,
+ MachinePointerInfo(Load->getMemOperand()->getValue()),
+ MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
+ Load->getAlignment()));
+ }
+ return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(), &Loads[0],
+ Loads.size());
+}
+
+SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
+ SelectionDAG &DAG) const {
+ StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
+ EVT MemVT = Store->getMemoryVT();
+ unsigned MemBits = MemVT.getSizeInBits();
+ // Byte stores are really expensive, so if possible, try to pack
+ // 32-bit vector truncatating store into an i32 store.
+ // XXX: We could also handle optimize other vector bitwidths
+ if (!MemVT.isVector() || MemBits > 32) {
+ return SDValue();
+ }
+
+ SDLoc DL(Op);
+ const SDValue &Value = Store->getValue();
+ EVT VT = Value.getValueType();
+ const SDValue &Ptr = Store->getBasePtr();
+ EVT MemEltVT = MemVT.getVectorElementType();
+ unsigned MemEltBits = MemEltVT.getSizeInBits();
+ unsigned MemNumElements = MemVT.getVectorNumElements();
+ EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
+ SDValue Mask;
+ switch(MemEltBits) {
+ case 8:
+ Mask = DAG.getConstant(0xFF, PackedVT);
+ break;
+ case 16:
+ Mask = DAG.getConstant(0xFFFF, PackedVT);
+ break;
+ default:
+ llvm_unreachable("Cannot lower this vector store");
+ }
+ SDValue PackedValue;
+ for (unsigned i = 0; i < MemNumElements; ++i) {
+ EVT ElemVT = VT.getVectorElementType();
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
+ DAG.getConstant(i, MVT::i32));
+ Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
+ Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
+ SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
+ Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
+ if (i == 0) {
+ PackedValue = Elt;
+ } else {
+ PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
+ }
+ }
+ return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
+ MachinePointerInfo(Store->getMemOperand()->getValue()),
+ Store->isVolatile(), Store->isNonTemporal(),
+ Store->getAlignment());
+}
+
+SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
+ SelectionDAG &DAG) const {
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
+ EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
+ EVT EltVT = Store->getValue().getValueType().getVectorElementType();
+ EVT PtrVT = Store->getBasePtr().getValueType();
+ unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
+ SDLoc SL(Op);
+
+ SmallVector<SDValue, 8> Chains;
+
+ for (unsigned i = 0, e = NumElts; i != e; ++i) {
+ SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
+ Store->getValue(), DAG.getConstant(i, MVT::i32));
+ SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
+ Store->getBasePtr(),
+ DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
+ PtrVT));
+ Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
+ MachinePointerInfo(Store->getMemOperand()->getValue()),
+ MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
+ Store->getAlignment()));
+ }
+ return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, &Chains[0], NumElts);
+}
+
+SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
+ if (Result.getNode()) {
+ return Result;
+ }
+
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
+ if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
+ Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
+ Store->getValue().getValueType().isVector()) {
+ return SplitVectorStore(Op, DAG);
+ }
+ return SDValue();
+}
SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue Num = Op.getOperand(0);
@@ -295,13 +656,13 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
DAG.getConstant(-1, VT),
DAG.getConstant(0, VT),
- ISD::SETGE);
- // Remainder_GE_Zero = (Remainder >= 0 ? -1 : 0)
- SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Remainder,
- DAG.getConstant(0, VT),
+ ISD::SETUGE);
+ // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
+ SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
+ Num_S_Remainder,
DAG.getConstant(-1, VT),
DAG.getConstant(0, VT),
- ISD::SETGE);
+ ISD::SETUGE);
// Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
Remainder_GE_Zero);
@@ -345,10 +706,62 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
return DAG.getMergeValues(Ops, 2, DL);
}
+SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue S0 = Op.getOperand(0);
+ SDLoc DL(Op);
+ if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
+ return SDValue();
+
+ // f32 uint_to_fp i64
+ SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
+ DAG.getConstant(0, MVT::i32));
+ SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
+ DAG.getConstant(1, MVT::i32));
+ SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
+ FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
+ DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
+ return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
+
+}
+
//===----------------------------------------------------------------------===//
// Helper functions
//===----------------------------------------------------------------------===//
+void AMDGPUTargetLowering::getOriginalFunctionArgs(
+ SelectionDAG &DAG,
+ const Function *F,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SmallVectorImpl<ISD::InputArg> &OrigIns) const {
+
+ for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
+ if (Ins[i].ArgVT == Ins[i].VT) {
+ OrigIns.push_back(Ins[i]);
+ continue;
+ }
+
+ EVT VT;
+ if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
+ // Vector has been split into scalars.
+ VT = Ins[i].ArgVT.getVectorElementType();
+ } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
+ Ins[i].ArgVT.getVectorElementType() !=
+ Ins[i].VT.getVectorElementType()) {
+ // Vector elements have been promoted
+ VT = Ins[i].ArgVT;
+ } else {
+ // Vector has been spilt into smaller vectors.
+ VT = Ins[i].VT;
+ }
+
+ ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
+ Ins[i].OrigArgIndex, Ins[i].PartOffset);
+ OrigIns.push_back(Arg);
+ }
+}
+
bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
return CFP->isExactlyValue(1.0);
@@ -410,5 +823,13 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(CONST_ADDRESS)
NODE_NAME_CASE(REGISTER_LOAD)
NODE_NAME_CASE(REGISTER_STORE)
+ NODE_NAME_CASE(LOAD_CONSTANT)
+ NODE_NAME_CASE(LOAD_INPUT)
+ NODE_NAME_CASE(SAMPLE)
+ NODE_NAME_CASE(SAMPLEB)
+ NODE_NAME_CASE(SAMPLED)
+ NODE_NAME_CASE(SAMPLEL)
+ NODE_NAME_CASE(STORE_MSKOR)
+ NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
}
}
diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h
index c2a79ea..2dfd3cf 100644
--- a/lib/Target/R600/AMDGPUISelLowering.h
+++ b/lib/Target/R600/AMDGPUISelLowering.h
@@ -20,12 +20,25 @@
namespace llvm {
+class AMDGPUMachineFunction;
class MachineRegisterInfo;
class AMDGPUTargetLowering : public TargetLowering {
private:
+ void ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Args,
+ unsigned Start, unsigned Count) const;
+ SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ /// \brief Lower vector stores by merging the vector elements into an integer
+ /// of the same bitwidth.
+ SDValue MergeVectorStore(const SDValue &Op, SelectionDAG &DAG) const;
+ /// \brief Split a vector store into multiple scalar stores.
+ /// \returns The resulting chain.
SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
protected:
@@ -33,23 +46,43 @@ protected:
/// MachineFunction.
///
/// \returns a RegisterSDNode representing Reg.
- SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC,
- unsigned Reg, EVT VT) const;
-
+ virtual SDValue CreateLiveInRegister(SelectionDAG &DAG,
+ const TargetRegisterClass *RC,
+ unsigned Reg, EVT VT) const;
+ SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
+ SelectionDAG &DAG) const;
+ /// \brief Split a vector load into multiple scalar loads.
+ SDValue SplitVectorLoad(const SDValue &Op, SelectionDAG &DAG) const;
+ SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
bool isHWTrueValue(SDValue Op) const;
bool isHWFalseValue(SDValue Op) const;
+ /// The SelectionDAGBuilder will automatically promote function arguments
+ /// with illegal types. However, this does not work for the AMDGPU targets
+ /// since the function arguments are stored in memory as these illegal types.
+ /// In order to handle this properly we need to get the origianl types sizes
+ /// from the LLVM IR Function and fixup the ISD:InputArg values before
+ /// passing them to AnalyzeFormalArguments()
+ void getOriginalFunctionArgs(SelectionDAG &DAG,
+ const Function *F,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SmallVectorImpl<ISD::InputArg> &OrigIns) const;
void AnalyzeFormalArguments(CCState &State,
const SmallVectorImpl<ISD::InputArg> &Ins) const;
public:
AMDGPUTargetLowering(TargetMachine &TM);
+ virtual bool isFAbsFree(EVT VT) const;
+ virtual bool isFNegFree(EVT VT) const;
+ virtual MVT getVectorIdxTy() const;
+ virtual bool isLoadBitCastBeneficial(EVT, EVT) const LLVM_OVERRIDE;
virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const;
+ SDLoc DL, SelectionDAG &DAG) const;
virtual SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
CLI.Callee.dump();
@@ -115,10 +148,10 @@ enum {
RET_FLAG,
BRANCH_COND,
// End AMDIL ISD Opcodes
- BITALIGN,
- BUFFER_STORE,
DWORDADDR,
FRACT,
+ COS_HW,
+ SIN_HW,
FMAX,
SMAX,
UMAX,
@@ -126,10 +159,21 @@ enum {
SMIN,
UMIN,
URECIP,
+ DOT4,
+ TEXTURE_FETCH,
EXPORT,
CONST_ADDRESS,
REGISTER_LOAD,
REGISTER_STORE,
+ LOAD_INPUT,
+ SAMPLE,
+ SAMPLEB,
+ SAMPLED,
+ SAMPLEL,
+ FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ STORE_MSKOR,
+ LOAD_CONSTANT,
+ TBUFFER_STORE_FORMAT,
LAST_AMDGPU_ISD_NUMBER
};
diff --git a/lib/Target/R600/AMDGPUIndirectAddressing.cpp b/lib/Target/R600/AMDGPUIndirectAddressing.cpp
deleted file mode 100644
index ed6c8ec..0000000
--- a/lib/Target/R600/AMDGPUIndirectAddressing.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
-//===-- AMDGPUIndirectAddressing.cpp - Indirect Adressing Support ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-///
-/// Instructions can use indirect addressing to index the register file as if it
-/// were memory. This pass lowers RegisterLoad and RegisterStore instructions
-/// to either a COPY or a MOV that uses indirect addressing.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "R600InstrInfo.h"
-#include "R600MachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-
-using namespace llvm;
-
-namespace {
-
-class AMDGPUIndirectAddressingPass : public MachineFunctionPass {
-
-private:
- static char ID;
- const AMDGPUInstrInfo *TII;
-
- bool regHasExplicitDef(MachineRegisterInfo &MRI, unsigned Reg) const;
-
-public:
- AMDGPUIndirectAddressingPass(TargetMachine &tm) :
- MachineFunctionPass(ID),
- TII(static_cast<const AMDGPUInstrInfo*>(tm.getInstrInfo()))
- { }
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- const char *getPassName() const { return "R600 Handle indirect addressing"; }
-
-};
-
-} // End anonymous namespace
-
-char AMDGPUIndirectAddressingPass::ID = 0;
-
-FunctionPass *llvm::createAMDGPUIndirectAddressingPass(TargetMachine &tm) {
- return new AMDGPUIndirectAddressingPass(tm);
-}
-
-bool AMDGPUIndirectAddressingPass::runOnMachineFunction(MachineFunction &MF) {
- MachineRegisterInfo &MRI = MF.getRegInfo();
-
- int IndirectBegin = TII->getIndirectIndexBegin(MF);
- int IndirectEnd = TII->getIndirectIndexEnd(MF);
-
- if (IndirectBegin == -1) {
- // No indirect addressing, we can skip this pass
- assert(IndirectEnd == -1);
- return false;
- }
-
- // The map keeps track of the indirect address that is represented by
- // each virtual register. The key is the register and the value is the
- // indirect address it uses.
- std::map<unsigned, unsigned> RegisterAddressMap;
-
- // First pass - Lower all of the RegisterStore instructions and track which
- // registers are live.
- for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
- BB != BB_E; ++BB) {
- // This map keeps track of the current live indirect registers.
- // The key is the address and the value is the register
- std::map<unsigned, unsigned> LiveAddressRegisterMap;
- MachineBasicBlock &MBB = *BB;
-
- for (MachineBasicBlock::iterator I = MBB.begin(), Next = llvm::next(I);
- I != MBB.end(); I = Next) {
- Next = llvm::next(I);
- MachineInstr &MI = *I;
-
- if (!TII->isRegisterStore(MI)) {
- continue;
- }
-
- // Lower RegisterStore
-
- unsigned RegIndex = MI.getOperand(2).getImm();
- unsigned Channel = MI.getOperand(3).getImm();
- unsigned Address = TII->calculateIndirectAddress(RegIndex, Channel);
- const TargetRegisterClass *IndirectStoreRegClass =
- TII->getIndirectAddrStoreRegClass(MI.getOperand(0).getReg());
-
- if (MI.getOperand(1).getReg() == AMDGPU::INDIRECT_BASE_ADDR) {
- // Direct register access.
- unsigned DstReg = MRI.createVirtualRegister(IndirectStoreRegClass);
-
- BuildMI(MBB, I, MBB.findDebugLoc(I), TII->get(AMDGPU::COPY), DstReg)
- .addOperand(MI.getOperand(0));
-
- RegisterAddressMap[DstReg] = Address;
- LiveAddressRegisterMap[Address] = DstReg;
- } else {
- // Indirect register access.
- MachineInstrBuilder MOV = TII->buildIndirectWrite(BB, I,
- MI.getOperand(0).getReg(), // Value
- Address,
- MI.getOperand(1).getReg()); // Offset
- for (int i = IndirectBegin; i <= IndirectEnd; ++i) {
- unsigned Addr = TII->calculateIndirectAddress(i, Channel);
- unsigned DstReg = MRI.createVirtualRegister(IndirectStoreRegClass);
- MOV.addReg(DstReg, RegState::Define | RegState::Implicit);
- RegisterAddressMap[DstReg] = Addr;
- LiveAddressRegisterMap[Addr] = DstReg;
- }
- }
- MI.eraseFromParent();
- }
-
- // Update the live-ins of the succesor blocks
- for (MachineBasicBlock::succ_iterator Succ = MBB.succ_begin(),
- SuccEnd = MBB.succ_end();
- SuccEnd != Succ; ++Succ) {
- std::map<unsigned, unsigned>::const_iterator Key, KeyEnd;
- for (Key = LiveAddressRegisterMap.begin(),
- KeyEnd = LiveAddressRegisterMap.end(); KeyEnd != Key; ++Key) {
- (*Succ)->addLiveIn(Key->second);
- }
- }
- }
-
- // Second pass - Lower the RegisterLoad instructions
- for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
- BB != BB_E; ++BB) {
- // Key is the address and the value is the register
- std::map<unsigned, unsigned> LiveAddressRegisterMap;
- MachineBasicBlock &MBB = *BB;
-
- MachineBasicBlock::livein_iterator LI = MBB.livein_begin();
- while (LI != MBB.livein_end()) {
- std::vector<unsigned> PhiRegisters;
-
- // Make sure this live in is used for indirect addressing
- if (RegisterAddressMap.find(*LI) == RegisterAddressMap.end()) {
- ++LI;
- continue;
- }
-
- unsigned Address = RegisterAddressMap[*LI];
- LiveAddressRegisterMap[Address] = *LI;
- PhiRegisters.push_back(*LI);
-
- // Check if there are other live in registers which map to the same
- // indirect address.
- for (MachineBasicBlock::livein_iterator LJ = llvm::next(LI),
- LE = MBB.livein_end();
- LJ != LE; ++LJ) {
- unsigned Reg = *LJ;
- if (RegisterAddressMap.find(Reg) == RegisterAddressMap.end()) {
- continue;
- }
-
- if (RegisterAddressMap[Reg] == Address) {
- PhiRegisters.push_back(Reg);
- }
- }
-
- if (PhiRegisters.size() == 1) {
- // We don't need to insert a Phi instruction, so we can just add the
- // registers to the live list for the block.
- LiveAddressRegisterMap[Address] = *LI;
- MBB.removeLiveIn(*LI);
- } else {
- // We need to insert a PHI, because we have the same address being
- // written in multiple predecessor blocks.
- const TargetRegisterClass *PhiDstClass =
- TII->getIndirectAddrStoreRegClass(*(PhiRegisters.begin()));
- unsigned PhiDstReg = MRI.createVirtualRegister(PhiDstClass);
- MachineInstrBuilder Phi = BuildMI(MBB, MBB.begin(),
- MBB.findDebugLoc(MBB.begin()),
- TII->get(AMDGPU::PHI), PhiDstReg);
-
- for (std::vector<unsigned>::const_iterator RI = PhiRegisters.begin(),
- RE = PhiRegisters.end();
- RI != RE; ++RI) {
- unsigned Reg = *RI;
- MachineInstr *DefInst = MRI.getVRegDef(Reg);
- assert(DefInst);
- MachineBasicBlock *RegBlock = DefInst->getParent();
- Phi.addReg(Reg);
- Phi.addMBB(RegBlock);
- MBB.removeLiveIn(Reg);
- }
- RegisterAddressMap[PhiDstReg] = Address;
- LiveAddressRegisterMap[Address] = PhiDstReg;
- }
- LI = MBB.livein_begin();
- }
-
- for (MachineBasicBlock::iterator I = MBB.begin(), Next = llvm::next(I);
- I != MBB.end(); I = Next) {
- Next = llvm::next(I);
- MachineInstr &MI = *I;
-
- if (!TII->isRegisterLoad(MI)) {
- if (MI.getOpcode() == AMDGPU::PHI) {
- continue;
- }
- // Check for indirect register defs
- for (unsigned OpIdx = 0, NumOperands = MI.getNumOperands();
- OpIdx < NumOperands; ++OpIdx) {
- MachineOperand &MO = MI.getOperand(OpIdx);
- if (MO.isReg() && MO.isDef() &&
- RegisterAddressMap.find(MO.getReg()) != RegisterAddressMap.end()) {
- unsigned Reg = MO.getReg();
- unsigned LiveAddress = RegisterAddressMap[Reg];
- // Chain the live-ins
- if (LiveAddressRegisterMap.find(LiveAddress) !=
- RegisterAddressMap.end()) {
- MI.addOperand(MachineOperand::CreateReg(
- LiveAddressRegisterMap[LiveAddress],
- false, // isDef
- true, // isImp
- true)); // isKill
- }
- LiveAddressRegisterMap[LiveAddress] = Reg;
- }
- }
- continue;
- }
-
- const TargetRegisterClass *SuperIndirectRegClass =
- TII->getSuperIndirectRegClass();
- const TargetRegisterClass *IndirectLoadRegClass =
- TII->getIndirectAddrLoadRegClass();
- unsigned IndirectReg = MRI.createVirtualRegister(SuperIndirectRegClass);
-
- unsigned RegIndex = MI.getOperand(2).getImm();
- unsigned Channel = MI.getOperand(3).getImm();
- unsigned Address = TII->calculateIndirectAddress(RegIndex, Channel);
-
- if (MI.getOperand(1).getReg() == AMDGPU::INDIRECT_BASE_ADDR) {
- // Direct register access
- unsigned Reg = LiveAddressRegisterMap[Address];
- unsigned AddrReg = IndirectLoadRegClass->getRegister(Address);
-
- if (regHasExplicitDef(MRI, Reg)) {
- // If the register we are reading from has an explicit def, then that
- // means it was written via a direct register access (i.e. COPY
- // or other instruction that doesn't use indirect addressing). In
- // this case we know where the value has been stored, so we can just
- // issue a copy.
- BuildMI(MBB, I, MBB.findDebugLoc(I), TII->get(AMDGPU::COPY),
- MI.getOperand(0).getReg())
- .addReg(Reg);
- } else {
- // If the register we are reading has an implicit def, then that
- // means it was written by an indirect register access (i.e. An
- // instruction that uses indirect addressing.
- BuildMI(MBB, I, MBB.findDebugLoc(I), TII->get(AMDGPU::COPY),
- MI.getOperand(0).getReg())
- .addReg(AddrReg)
- .addReg(Reg, RegState::Implicit);
- }
- } else {
- // Indirect register access
-
- // Note on REQ_SEQUENCE instructons: You can't actually use the register
- // it defines unless you have an instruction that takes the defined
- // register class as an operand.
-
- MachineInstrBuilder Sequence = BuildMI(MBB, I, MBB.findDebugLoc(I),
- TII->get(AMDGPU::REG_SEQUENCE),
- IndirectReg);
- for (int i = IndirectBegin; i <= IndirectEnd; ++i) {
- unsigned Addr = TII->calculateIndirectAddress(i, Channel);
- if (LiveAddressRegisterMap.find(Addr) == LiveAddressRegisterMap.end()) {
- continue;
- }
- unsigned Reg = LiveAddressRegisterMap[Addr];
-
- // We only need to use REG_SEQUENCE for explicit defs, since the
- // register coalescer won't do anything with the implicit defs.
- if (!regHasExplicitDef(MRI, Reg)) {
- continue;
- }
-
- // Insert a REQ_SEQUENCE instruction to force the register allocator
- // to allocate the virtual register to the correct physical register.
- Sequence.addReg(LiveAddressRegisterMap[Addr]);
- Sequence.addImm(TII->getRegisterInfo().getIndirectSubReg(Addr));
- }
- MachineInstrBuilder Mov = TII->buildIndirectRead(BB, I,
- MI.getOperand(0).getReg(), // Value
- Address,
- MI.getOperand(1).getReg()); // Offset
-
-
-
- Mov.addReg(IndirectReg, RegState::Implicit | RegState::Kill);
- Mov.addReg(LiveAddressRegisterMap[Address], RegState::Implicit);
-
- }
- MI.eraseFromParent();
- }
- }
- return false;
-}
-
-bool AMDGPUIndirectAddressingPass::regHasExplicitDef(MachineRegisterInfo &MRI,
- unsigned Reg) const {
- MachineInstr *DefInstr = MRI.getVRegDef(Reg);
-
- if (!DefInstr) {
- return false;
- }
-
- if (DefInstr->getOpcode() == AMDGPU::PHI) {
- bool Explicit = false;
- for (MachineInstr::const_mop_iterator I = DefInstr->operands_begin(),
- E = DefInstr->operands_end();
- I != E; ++I) {
- const MachineOperand &MO = *I;
- if (!MO.isReg() || MO.isDef()) {
- continue;
- }
-
- Explicit = Explicit || regHasExplicitDef(MRI, MO.getReg());
- }
- return Explicit;
- }
-
- return DefInstr->getOperand(0).isReg() &&
- DefInstr->getOperand(0).getReg() == Reg;
-}
diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/R600/AMDGPUInstrInfo.cpp
index 30f736c..4f7084b 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.cpp
+++ b/lib/Target/R600/AMDGPUInstrInfo.cpp
@@ -16,19 +16,23 @@
#include "AMDGPUInstrInfo.h"
#include "AMDGPURegisterInfo.h"
#include "AMDGPUTargetMachine.h"
-#include "AMDIL.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
+#define GET_INSTRINFO_NAMED_OPS
#define GET_INSTRMAP_INFO
#include "AMDGPUGenInstrInfo.inc"
using namespace llvm;
+
+// Pin the vtable to this file.
+void AMDGPUInstrInfo::anchor() {}
+
AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
- : AMDGPUGenInstrInfo(0,0), RI(tm, *this), TM(tm) { }
+ : AMDGPUGenInstrInfo(-1,-1), RI(tm), TM(tm) { }
const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
return RI;
@@ -99,27 +103,6 @@ bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
return false;
}
-MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
- MachineBasicBlock::iterator tmp = MBB->end();
- if (!MBB->size()) {
- return MBB->end();
- }
- while (--tmp) {
- if (tmp->getOpcode() == AMDGPU::ENDLOOP
- || tmp->getOpcode() == AMDGPU::ENDIF
- || tmp->getOpcode() == AMDGPU::ELSE) {
- if (tmp == MBB->begin()) {
- return tmp;
- } else {
- continue;
- }
- } else {
- return ++tmp;
- }
- }
- return MBB->end();
-}
-
void
AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
@@ -139,6 +122,55 @@ AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
assert(!"Not Implemented");
}
+bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
+ MachineBasicBlock *MBB = MI->getParent();
+ int OffsetOpIdx =
+ AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::addr);
+ // addr is a custom operand with multiple MI operands, and only the
+ // first MI operand is given a name.
+ int RegOpIdx = OffsetOpIdx + 1;
+ int ChanOpIdx =
+ AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::chan);
+
+ if (isRegisterLoad(*MI)) {
+ int DstOpIdx =
+ AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
+ unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
+ unsigned Address = calculateIndirectAddress(RegIndex, Channel);
+ unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
+ if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
+ buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
+ getIndirectAddrRegClass()->getRegister(Address));
+ } else {
+ buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
+ Address, OffsetReg);
+ }
+ } else if (isRegisterStore(*MI)) {
+ int ValOpIdx =
+ AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::val);
+ AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
+ unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
+ unsigned Address = calculateIndirectAddress(RegIndex, Channel);
+ unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
+ if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
+ buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
+ MI->getOperand(ValOpIdx).getReg());
+ } else {
+ buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
+ calculateIndirectAddress(RegIndex, Channel),
+ OffsetReg);
+ }
+ } else {
+ return false;
+ }
+
+ MBB->erase(MI);
+ return true;
+}
+
+
MachineInstr *
AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
@@ -244,6 +276,57 @@ bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
}
+int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ int Offset = -1;
+
+ if (MFI->getNumObjects() == 0) {
+ return -1;
+ }
+
+ if (MRI.livein_empty()) {
+ return 0;
+ }
+
+ const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
+ for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
+ LE = MRI.livein_end();
+ LI != LE; ++LI) {
+ unsigned Reg = LI->first;
+ if (TargetRegisterInfo::isVirtualRegister(Reg) ||
+ !IndirectRC->contains(Reg))
+ continue;
+
+ unsigned RegIndex;
+ unsigned RegEnd;
+ for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
+ ++RegIndex) {
+ if (IndirectRC->getRegister(RegIndex) == Reg)
+ break;
+ }
+ Offset = std::max(Offset, (int)RegIndex);
+ }
+
+ return Offset + 1;
+}
+
+int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
+ int Offset = 0;
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Variable sized objects are not supported
+ assert(!MFI->hasVarSizedObjects());
+
+ if (MFI->getNumObjects() == 0) {
+ return -1;
+ }
+
+ Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
+
+ return getIndirectIndexBegin(MF) + Offset;
+}
+
void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
DebugLoc DL) const {
@@ -265,3 +348,12 @@ void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
}
}
}
+
+int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
+ switch (Channels) {
+ default: return Opcode;
+ case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
+ case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
+ case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
+ }
+}
diff --git a/lib/Target/R600/AMDGPUInstrInfo.h b/lib/Target/R600/AMDGPUInstrInfo.h
index 3909e4e..ce5b58c 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.h
+++ b/lib/Target/R600/AMDGPUInstrInfo.h
@@ -23,6 +23,7 @@
#define GET_INSTRINFO_HEADER
#define GET_INSTRINFO_ENUM
+#define GET_INSTRINFO_OPERAND_ENUM
#include "AMDGPUGenInstrInfo.inc"
#define OPCODE_IS_ZERO_INT AMDGPU::PRED_SETE_INT
@@ -42,6 +43,7 @@ private:
const AMDGPURegisterInfo RI;
bool getNextBranchInstr(MachineBasicBlock::iterator &iter,
MachineBasicBlock &MBB) const;
+ virtual void anchor();
protected:
TargetMachine &TM;
public:
@@ -86,6 +88,8 @@ public:
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
+ virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
+
protected:
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
@@ -96,6 +100,14 @@ protected:
MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) const;
+ /// \returns the smallest register index that will be accessed by an indirect
+ /// read or write or -1 if indirect addressing is not used by this program.
+ virtual int getIndirectIndexBegin(const MachineFunction &MF) const;
+
+ /// \returns the largest register index that will be accessed by an indirect
+ /// read or write or -1 if indirect addressing is not used by this program.
+ virtual int getIndirectIndexEnd(const MachineFunction &MF) const;
+
public:
bool canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const;
@@ -138,19 +150,9 @@ public:
// Pure virtual funtions to be implemented by sub-classes.
//===---------------------------------------------------------------------===//
- virtual MachineInstr* getMovImmInstr(MachineFunction *MF, unsigned DstReg,
- int64_t Imm) const = 0;
virtual unsigned getIEQOpcode() const = 0;
virtual bool isMov(unsigned opcode) const = 0;
- /// \returns the smallest register index that will be accessed by an indirect
- /// read or write or -1 if indirect addressing is not used by this program.
- virtual int getIndirectIndexBegin(const MachineFunction &MF) const = 0;
-
- /// \returns the largest register index that will be accessed by an indirect
- /// read or write or -1 if indirect addressing is not used by this program.
- virtual int getIndirectIndexEnd(const MachineFunction &MF) const = 0;
-
/// \brief Calculate the "Indirect Address" for the given \p RegIndex and
/// \p Channel
///
@@ -161,14 +163,9 @@ public:
virtual unsigned calculateIndirectAddress(unsigned RegIndex,
unsigned Channel) const = 0;
- /// \returns The register class to be used for storing values to an
- /// "Indirect Address" .
- virtual const TargetRegisterClass *getIndirectAddrStoreRegClass(
- unsigned SourceReg) const = 0;
-
- /// \returns The register class to be used for loading values from
- /// an "Indirect Address" .
- virtual const TargetRegisterClass *getIndirectAddrLoadRegClass() const = 0;
+ /// \returns The register class to be used for loading and storing values
+ /// from an "Indirect Address" .
+ virtual const TargetRegisterClass *getIndirectAddrRegClass() const = 0;
/// \brief Build instruction(s) for an indirect register write.
///
@@ -186,18 +183,27 @@ public:
unsigned ValueReg, unsigned Address,
unsigned OffsetReg) const = 0;
- /// \returns the register class whose sub registers are the set of all
- /// possible registers that can be used for indirect addressing.
- virtual const TargetRegisterClass *getSuperIndirectRegClass() const = 0;
-
/// \brief Convert the AMDIL MachineInstr to a supported ISA
/// MachineInstr
virtual void convertToISA(MachineInstr & MI, MachineFunction &MF,
DebugLoc DL) const;
+ /// \brief Build a MOV instruction.
+ virtual MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, unsigned SrcReg) const = 0;
+
+ /// \brief Given a MIMG \p Opcode that writes all 4 channels, return the
+ /// equivalent opcode that writes \p Channels Channels.
+ int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const;
+
};
+namespace AMDGPU {
+ int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
+} // End namespace AMDGPU
+
} // End llvm namespace
#define AMDGPU_FLAG_REGISTER_LOAD (UINT64_C(1) << 63)
diff --git a/lib/Target/R600/AMDGPUInstrInfo.td b/lib/Target/R600/AMDGPUInstrInfo.td
index b66ae87..fccede0 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.td
+++ b/lib/Target/R600/AMDGPUInstrInfo.td
@@ -23,12 +23,6 @@ def AMDGPUDTIntTernaryOp : SDTypeProfile<1, 3, [
// AMDGPU DAG Nodes
//
-// out = ((a << 32) | b) >> c)
-//
-// Can be used to optimize rtol:
-// rotl(a, b) = bitalign(a, a, 32 - b)
-def AMDGPUbitalign : SDNode<"AMDGPUISD::BITALIGN", AMDGPUDTIntTernaryOp>;
-
// This argument to this node is a dword address.
def AMDGPUdwordaddr : SDNode<"AMDGPUISD::DWORDADDR", SDTIntUnaryOp>;
@@ -71,8 +65,6 @@ def AMDGPUumin : SDNode<"AMDGPUISD::UMIN", SDTIntBinOp,
// e is rounding error
def AMDGPUurecip : SDNode<"AMDGPUISD::URECIP", SDTIntUnaryOp>;
-def fpow : SDNode<"ISD::FPOW", SDTFPBinOp>;
-
def AMDGPUregister_load : SDNode<"AMDGPUISD::REGISTER_LOAD",
SDTypeProfile<1, 2, [SDTCisPtrTy<1>, SDTCisInt<2>]>,
[SDNPHasChain, SDNPMayLoad]>;
@@ -80,3 +72,17 @@ def AMDGPUregister_load : SDNode<"AMDGPUISD::REGISTER_LOAD",
def AMDGPUregister_store : SDNode<"AMDGPUISD::REGISTER_STORE",
SDTypeProfile<0, 3, [SDTCisPtrTy<1>, SDTCisInt<2>]>,
[SDNPHasChain, SDNPMayStore]>;
+
+// MSKOR instructions are atomic memory instructions used mainly for storing
+// 8-bit and 16-bit values. The definition is:
+//
+// MSKOR(dst, mask, src) MEM[dst] = ((MEM[dst] & ~mask) | src)
+//
+// src0: vec4(src, 0, 0, mask)
+// src1: dst - rat offset (aka pointer) in dwords
+def AMDGPUstore_mskor : SDNode<"AMDGPUISD::STORE_MSKOR",
+ SDTypeProfile<0, 2, []>,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+
+def AMDGPUround : SDNode<"ISD::FROUND",
+ SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>>;
diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td
index d2620b2..3c5375d 100644
--- a/lib/Target/R600/AMDGPUInstructions.td
+++ b/lib/Target/R600/AMDGPUInstructions.td
@@ -35,46 +35,75 @@ class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
}
def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
+def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
-def COND_EQ : PatLeaf <
+//===----------------------------------------------------------------------===//
+// PatLeafs for floating-point comparisons
+//===----------------------------------------------------------------------===//
+
+def COND_OEQ : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETOEQ: case ISD::SETUEQ:
- case ISD::SETEQ: return true;}}}]
+ [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
>;
-def COND_NE : PatLeaf <
+def COND_OGT : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETONE: case ISD::SETUNE:
- case ISD::SETNE: return true;}}}]
+ [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
>;
-def COND_GT : PatLeaf <
+
+def COND_OGE : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETOGT: case ISD::SETUGT:
- case ISD::SETGT: return true;}}}]
+ [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
>;
-def COND_GE : PatLeaf <
+def COND_OLT : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETOGE: case ISD::SETUGE:
- case ISD::SETGE: return true;}}}]
+ [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}]
>;
-def COND_LT : PatLeaf <
+def COND_OLE : PatLeaf <
+ (cond),
+ [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
+>;
+
+def COND_UNE : PatLeaf <
+ (cond),
+ [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
+>;
+
+def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
+def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
+
+//===----------------------------------------------------------------------===//
+// PatLeafs for unsigned comparisons
+//===----------------------------------------------------------------------===//
+
+def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
+def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
+def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
+def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
+
+//===----------------------------------------------------------------------===//
+// PatLeafs for signed comparisons
+//===----------------------------------------------------------------------===//
+
+def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>;
+def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>;
+def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>;
+def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>;
+
+//===----------------------------------------------------------------------===//
+// PatLeafs for integer equality
+//===----------------------------------------------------------------------===//
+
+def COND_EQ : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETOLT: case ISD::SETULT:
- case ISD::SETLT: return true;}}}]
+ [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}]
>;
-def COND_LE : PatLeaf <
+def COND_NE : PatLeaf <
(cond),
- [{switch(N->get()){{default: return false;
- case ISD::SETOLE: case ISD::SETULE:
- case ISD::SETLE: return true;}}}]
+ [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}]
>;
def COND_NULL : PatLeaf <
@@ -86,15 +115,131 @@ def COND_NULL : PatLeaf <
// Load/Store Pattern Fragments
//===----------------------------------------------------------------------===//
-def zextloadi8_global : PatFrag<(ops node:$ptr), (zextloadi8 node:$ptr), [{
+def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ LoadSDNode *L = cast<LoadSDNode>(N);
+ return L->getExtensionType() == ISD::ZEXTLOAD ||
+ L->getExtensionType() == ISD::EXTLOAD;
+}]>;
+
+def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
return isGlobalLoad(dyn_cast<LoadSDNode>(N));
}]>;
+def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
+ return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
+ return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
+ return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
+ return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
+ return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
+ return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
+ return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def az_extloadi32_global : PatFrag<(ops node:$ptr),
+ (az_extloadi32 node:$ptr), [{
+ return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi32_constant : PatFrag<(ops node:$ptr),
+ (az_extloadi32 node:$ptr), [{
+ return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr),
+ (truncstorei8 node:$val, node:$ptr), [{
+ return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr),
+ (truncstorei16 node:$val, node:$ptr), [{
+ return isGlobalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def local_store : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr),
+ (truncstorei8 node:$val, node:$ptr), [{
+ return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr),
+ (truncstorei16 node:$val, node:$ptr), [{
+ return isLocalStore(dyn_cast<StoreSDNode>(N));
+}]>;
+
+def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return isLocalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def atomic_load_add_local : PatFrag<(ops node:$ptr, node:$value),
+ (atomic_load_add node:$ptr, node:$value), [{
+ return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
+}]>;
+
+def atomic_load_sub_local : PatFrag<(ops node:$ptr, node:$value),
+ (atomic_load_sub node:$ptr, node:$value), [{
+ return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
+}]>;
+
+def mskor_global : PatFrag<(ops node:$val, node:$ptr),
+ (AMDGPUstore_mskor node:$val, node:$ptr), [{
+ return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
+}]>;
+
class Constants {
int TWO_PI = 0x40c90fdb;
int PI = 0x40490fdb;
int TWO_PI_INV = 0x3e22f983;
-int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
+int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
}
def CONST : Constants;
@@ -108,6 +253,9 @@ def FP_ONE : PatLeaf <
[{return N->isExactlyValue(1.0);}]
>;
+def U24 : ComplexPattern<i32, 1, "SelectU24", [], []>;
+def I24 : ComplexPattern<i32, 1, "SelectI24", [], []>;
+
let isCodeGenOnly = 1, isPseudo = 1 in {
let usesCustomInserter = 1 in {
@@ -137,6 +285,8 @@ class FNEG <RegisterClass rc> : AMDGPUShaderInst <
multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass,
ComplexPattern addrPat> {
+let UseNamedOperandTable = 1 in {
+
def RegisterLoad : AMDGPUShaderInst <
(outs dstClass:$dst),
(ins addrClass:$addr, i32imm:$chan),
@@ -155,6 +305,7 @@ multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass,
let isRegisterStore = 1;
}
}
+}
} // End isCodeGenOnly = 1, isPseudo = 1
@@ -186,61 +337,12 @@ class Insert_Element <ValueType elem_type, ValueType vec_type,
(INSERT_SUBREG $vec, $elem, sub_reg)
>;
-// Vector Build pattern
-class Vector1_Build <ValueType vecType, ValueType elemType,
- RegisterClass rc> : Pat <
- (vecType (build_vector elemType:$src)),
- (vecType (COPY_TO_REGCLASS $src, rc))
->;
-
-class Vector2_Build <ValueType vecType, ValueType elemType> : Pat <
- (vecType (build_vector elemType:$sub0, elemType:$sub1)),
- (INSERT_SUBREG (INSERT_SUBREG
- (vecType (IMPLICIT_DEF)), $sub0, sub0), $sub1, sub1)
->;
-
class Vector4_Build <ValueType vecType, ValueType elemType> : Pat <
(vecType (build_vector elemType:$x, elemType:$y, elemType:$z, elemType:$w)),
(INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
(vecType (IMPLICIT_DEF)), $x, sub0), $y, sub1), $z, sub2), $w, sub3)
>;
-class Vector8_Build <ValueType vecType, ValueType elemType> : Pat <
- (vecType (build_vector elemType:$sub0, elemType:$sub1,
- elemType:$sub2, elemType:$sub3,
- elemType:$sub4, elemType:$sub5,
- elemType:$sub6, elemType:$sub7)),
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (vecType (IMPLICIT_DEF)), $sub0, sub0), $sub1, sub1),
- $sub2, sub2), $sub3, sub3),
- $sub4, sub4), $sub5, sub5),
- $sub6, sub6), $sub7, sub7)
->;
-
-class Vector16_Build <ValueType vecType, ValueType elemType> : Pat <
- (vecType (build_vector elemType:$sub0, elemType:$sub1,
- elemType:$sub2, elemType:$sub3,
- elemType:$sub4, elemType:$sub5,
- elemType:$sub6, elemType:$sub7,
- elemType:$sub8, elemType:$sub9,
- elemType:$sub10, elemType:$sub11,
- elemType:$sub12, elemType:$sub13,
- elemType:$sub14, elemType:$sub15)),
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG (INSERT_SUBREG
- (vecType (IMPLICIT_DEF)), $sub0, sub0), $sub1, sub1),
- $sub2, sub2), $sub3, sub3),
- $sub4, sub4), $sub5, sub5),
- $sub6, sub6), $sub7, sub7),
- $sub8, sub8), $sub9, sub9),
- $sub10, sub10), $sub11, sub11),
- $sub12, sub12), $sub13, sub13),
- $sub14, sub14), $sub15, sub15)
->;
-
// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
// can handle COPY instructions.
// bitconvert pattern
@@ -295,6 +397,22 @@ class BFEPattern <Instruction BFE> : Pat <
(BFE $x, $y, $z)
>;
+// rotr pattern
+class ROTRPattern <Instruction BIT_ALIGN> : Pat <
+ (rotr i32:$src0, i32:$src1),
+ (BIT_ALIGN $src0, $src0, $src1)
+>;
+
+// 24-bit arithmetic patterns
+def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
+
+/*
+class UMUL24Pattern <Instruction UMUL24> : Pat <
+ (mul U24:$x, U24:$y),
+ (UMUL24 $x, $y)
+>;
+*/
+
include "R600Instructions.td"
include "SIInstrInfo.td"
diff --git a/lib/Target/R600/AMDGPUIntrinsics.td b/lib/Target/R600/AMDGPUIntrinsics.td
index eecb25b..9f975bf 100644
--- a/lib/Target/R600/AMDGPUIntrinsics.td
+++ b/lib/Target/R600/AMDGPUIntrinsics.td
@@ -50,6 +50,8 @@ let TargetPrefix = "AMDGPU", isTarget = 1 in {
def int_AMDGPU_umax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_umin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_cube : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+ def int_AMDGPU_barrier_local : Intrinsic<[], [], []>;
}
let TargetPrefix = "TGSI", isTarget = 1 in {
diff --git a/lib/Target/R600/AMDGPUMCInstLower.cpp b/lib/Target/R600/AMDGPUMCInstLower.cpp
index 1dc1c65..0ed598e 100644
--- a/lib/Target/R600/AMDGPUMCInstLower.cpp
+++ b/lib/Target/R600/AMDGPUMCInstLower.cpp
@@ -15,14 +15,19 @@
#include "AMDGPUMCInstLower.h"
#include "AMDGPUAsmPrinter.h"
+#include "InstPrinter/AMDGPUInstPrinter.h"
#include "R600InstrInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/IR/Constants.h"
+#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
using namespace llvm;
@@ -69,15 +74,45 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MachineBasicBlock::const_instr_iterator I = MI;
++I;
while (I != MBB->end() && I->isInsideBundle()) {
- MCInst MCBundleInst;
- const MachineInstr *BundledInst = I;
- MCInstLowering.lower(BundledInst, MCBundleInst);
- OutStreamer.EmitInstruction(MCBundleInst);
+ EmitInstruction(I);
++I;
}
} else {
MCInst TmpInst;
MCInstLowering.lower(MI, TmpInst);
OutStreamer.EmitInstruction(TmpInst);
+
+ if (DisasmEnabled) {
+ // Disassemble instruction/operands to text.
+ DisasmLines.resize(DisasmLines.size() + 1);
+ std::string &DisasmLine = DisasmLines.back();
+ raw_string_ostream DisasmStream(DisasmLine);
+
+ AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(), *TM.getInstrInfo(),
+ *TM.getRegisterInfo());
+ InstPrinter.printInst(&TmpInst, DisasmStream, StringRef());
+
+ // Disassemble instruction/operands to hex representation.
+ SmallVector<MCFixup, 4> Fixups;
+ SmallVector<char, 16> CodeBytes;
+ raw_svector_ostream CodeStream(CodeBytes);
+
+ MCObjectStreamer &ObjStreamer = (MCObjectStreamer &)OutStreamer;
+ MCCodeEmitter &InstEmitter = ObjStreamer.getAssembler().getEmitter();
+ InstEmitter.EncodeInstruction(TmpInst, CodeStream, Fixups);
+ CodeStream.flush();
+
+ HexLines.resize(HexLines.size() + 1);
+ std::string &HexLine = HexLines.back();
+ raw_string_ostream HexStream(HexLine);
+
+ for (size_t i = 0; i < CodeBytes.size(); i += 4) {
+ unsigned int CodeDWord = *(unsigned int *)&CodeBytes[i];
+ HexStream << format("%s%08X", (i > 0 ? " " : ""), CodeDWord);
+ }
+
+ DisasmStream.flush();
+ DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLine.size());
+ }
}
}
diff --git a/lib/Target/R600/AMDGPUMachineFunction.cpp b/lib/Target/R600/AMDGPUMachineFunction.cpp
index 0461025..14171f4 100644
--- a/lib/Target/R600/AMDGPUMachineFunction.cpp
+++ b/lib/Target/R600/AMDGPUMachineFunction.cpp
@@ -2,14 +2,17 @@
#include "AMDGPU.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Function.h"
+using namespace llvm;
-namespace llvm {
+static const char *const ShaderTypeAttribute = "ShaderType";
-const char *AMDGPUMachineFunction::ShaderTypeAttribute = "ShaderType";
+// Pin the vtable to this file.
+void AMDGPUMachineFunction::anchor() {}
AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
MachineFunctionInfo() {
ShaderType = ShaderType::COMPUTE;
+ LDSSize = 0;
AttributeSet Set = MF.getFunction()->getAttributes();
Attribute A = Set.getAttribute(AttributeSet::FunctionIndex,
ShaderTypeAttribute);
@@ -20,5 +23,3 @@ AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
llvm_unreachable("Can't parse shader type!");
}
}
-
-}
diff --git a/lib/Target/R600/AMDGPUMachineFunction.h b/lib/Target/R600/AMDGPUMachineFunction.h
index 21c8c51..fea0b39 100644
--- a/lib/Target/R600/AMDGPUMachineFunction.h
+++ b/lib/Target/R600/AMDGPUMachineFunction.h
@@ -14,15 +14,20 @@
#define AMDGPUMACHINEFUNCTION_H
#include "llvm/CodeGen/MachineFunction.h"
+#include <map>
namespace llvm {
class AMDGPUMachineFunction : public MachineFunctionInfo {
-private:
- static const char *ShaderTypeAttribute;
+ virtual void anchor();
public:
AMDGPUMachineFunction(const MachineFunction &MF);
unsigned ShaderType;
+ /// A map to keep track of local memory objects and their offsets within
+ /// the local memory space.
+ std::map<const GlobalValue *, unsigned> LocalMemoryObjects;
+ /// Number of bytes in the LDS that are being used.
+ unsigned LDSSize;
};
}
diff --git a/lib/Target/R600/AMDGPURegisterInfo.cpp b/lib/Target/R600/AMDGPURegisterInfo.cpp
index fe994d2..47617a7 100644
--- a/lib/Target/R600/AMDGPURegisterInfo.cpp
+++ b/lib/Target/R600/AMDGPURegisterInfo.cpp
@@ -17,11 +17,9 @@
using namespace llvm;
-AMDGPURegisterInfo::AMDGPURegisterInfo(TargetMachine &tm,
- const TargetInstrInfo &tii)
+AMDGPURegisterInfo::AMDGPURegisterInfo(TargetMachine &tm)
: AMDGPUGenRegisterInfo(0),
- TM(tm),
- TII(tii)
+ TM(tm)
{ }
//===----------------------------------------------------------------------===//
@@ -48,27 +46,21 @@ unsigned AMDGPURegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return 0;
}
+unsigned AMDGPURegisterInfo::getSubRegFromChannel(unsigned Channel) const {
+ static const unsigned SubRegs[] = {
+ AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4,
+ AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, AMDGPU::sub8, AMDGPU::sub9,
+ AMDGPU::sub10, AMDGPU::sub11, AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14,
+ AMDGPU::sub15
+ };
+
+ assert (Channel < array_lengthof(SubRegs));
+ return SubRegs[Channel];
+}
+
unsigned AMDGPURegisterInfo::getIndirectSubReg(unsigned IndirectIndex) const {
- switch(IndirectIndex) {
- case 0: return AMDGPU::sub0;
- case 1: return AMDGPU::sub1;
- case 2: return AMDGPU::sub2;
- case 3: return AMDGPU::sub3;
- case 4: return AMDGPU::sub4;
- case 5: return AMDGPU::sub5;
- case 6: return AMDGPU::sub6;
- case 7: return AMDGPU::sub7;
- case 8: return AMDGPU::sub8;
- case 9: return AMDGPU::sub9;
- case 10: return AMDGPU::sub10;
- case 11: return AMDGPU::sub11;
- case 12: return AMDGPU::sub12;
- case 13: return AMDGPU::sub13;
- case 14: return AMDGPU::sub14;
- case 15: return AMDGPU::sub15;
- default: llvm_unreachable("indirect index out of range");
- }
+ return getSubRegFromChannel(IndirectIndex);
}
#define GET_REGINFO_TARGET_DESC
diff --git a/lib/Target/R600/AMDGPURegisterInfo.h b/lib/Target/R600/AMDGPURegisterInfo.h
index 1fc88e7..688e1a0 100644
--- a/lib/Target/R600/AMDGPURegisterInfo.h
+++ b/lib/Target/R600/AMDGPURegisterInfo.h
@@ -30,10 +30,9 @@ class TargetInstrInfo;
struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo {
TargetMachine &TM;
- const TargetInstrInfo &TII;
static const uint16_t CalleeSavedReg;
- AMDGPURegisterInfo(TargetMachine &tm, const TargetInstrInfo &tii);
+ AMDGPURegisterInfo(TargetMachine &tm);
virtual BitVector getReservedRegs(const MachineFunction &MF) const {
assert(!"Unimplemented"); return BitVector();
@@ -51,6 +50,14 @@ struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo {
assert(!"Unimplemented"); return NULL;
}
+ virtual unsigned getHWRegIndex(unsigned Reg) const {
+ assert(!"Unimplemented"); return 0;
+ }
+
+ /// \returns the sub reg enum value for the given \p Channel
+ /// (e.g. getSubRegFromChannel(0) -> AMDGPU::sub0)
+ unsigned getSubRegFromChannel(unsigned Channel) const;
+
const uint16_t* getCalleeSavedRegs(const MachineFunction *MF) const;
void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
unsigned FIOperandNum,
diff --git a/lib/Target/R600/AMDGPURegisterInfo.td b/lib/Target/R600/AMDGPURegisterInfo.td
index b5aca03..835a146 100644
--- a/lib/Target/R600/AMDGPURegisterInfo.td
+++ b/lib/Target/R600/AMDGPURegisterInfo.td
@@ -14,7 +14,8 @@
let Namespace = "AMDGPU" in {
foreach Index = 0-15 in {
- def sub#Index : SubRegIndex;
+ // Indices are used in a variety of ways here, so don't set a size/offset.
+ def sub#Index : SubRegIndex<-1, -1>;
}
def INDIRECT_BASE_ADDR : Register <"INDIRECT_BASE_ADDR">;
diff --git a/lib/Target/R600/AMDGPUStructurizeCFG.cpp b/lib/Target/R600/AMDGPUStructurizeCFG.cpp
deleted file mode 100644
index dea43b8..0000000
--- a/lib/Target/R600/AMDGPUStructurizeCFG.cpp
+++ /dev/null
@@ -1,896 +0,0 @@
-//===-- AMDGPUStructurizeCFG.cpp - ------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// The pass implemented in this file transforms the programs control flow
-/// graph into a form that's suitable for code generation on hardware that
-/// implements control flow by execution masking. This currently includes all
-/// AMD GPUs but may as well be useful for other types of hardware.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "llvm/ADT/SCCIterator.h"
-#include "llvm/ADT/MapVector.h"
-#include "llvm/Analysis/RegionInfo.h"
-#include "llvm/Analysis/RegionIterator.h"
-#include "llvm/Analysis/RegionPass.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Transforms/Utils/SSAUpdater.h"
-#include "llvm/Support/PatternMatch.h"
-
-using namespace llvm;
-using namespace llvm::PatternMatch;
-
-namespace {
-
-// Definition of the complex types used in this pass.
-
-typedef std::pair<BasicBlock *, Value *> BBValuePair;
-
-typedef SmallVector<RegionNode*, 8> RNVector;
-typedef SmallVector<BasicBlock*, 8> BBVector;
-typedef SmallVector<BranchInst*, 8> BranchVector;
-typedef SmallVector<BBValuePair, 2> BBValueVector;
-
-typedef SmallPtrSet<BasicBlock *, 8> BBSet;
-
-typedef MapVector<PHINode *, BBValueVector> PhiMap;
-typedef MapVector<BasicBlock *, BBVector> BB2BBVecMap;
-
-typedef DenseMap<DomTreeNode *, unsigned> DTN2UnsignedMap;
-typedef DenseMap<BasicBlock *, PhiMap> BBPhiMap;
-typedef DenseMap<BasicBlock *, Value *> BBPredicates;
-typedef DenseMap<BasicBlock *, BBPredicates> PredMap;
-typedef DenseMap<BasicBlock *, BasicBlock*> BB2BBMap;
-
-// The name for newly created blocks.
-
-static const char *FlowBlockName = "Flow";
-
-/// @brief Find the nearest common dominator for multiple BasicBlocks
-///
-/// Helper class for AMDGPUStructurizeCFG
-/// TODO: Maybe move into common code
-class NearestCommonDominator {
-
- DominatorTree *DT;
-
- DTN2UnsignedMap IndexMap;
-
- BasicBlock *Result;
- unsigned ResultIndex;
- bool ExplicitMentioned;
-
-public:
- /// \brief Start a new query
- NearestCommonDominator(DominatorTree *DomTree) {
- DT = DomTree;
- Result = 0;
- }
-
- /// \brief Add BB to the resulting dominator
- void addBlock(BasicBlock *BB, bool Remember = true) {
-
- DomTreeNode *Node = DT->getNode(BB);
-
- if (Result == 0) {
- unsigned Numbering = 0;
- for (;Node;Node = Node->getIDom())
- IndexMap[Node] = ++Numbering;
- Result = BB;
- ResultIndex = 1;
- ExplicitMentioned = Remember;
- return;
- }
-
- for (;Node;Node = Node->getIDom())
- if (IndexMap.count(Node))
- break;
- else
- IndexMap[Node] = 0;
-
- assert(Node && "Dominator tree invalid!");
-
- unsigned Numbering = IndexMap[Node];
- if (Numbering > ResultIndex) {
- Result = Node->getBlock();
- ResultIndex = Numbering;
- ExplicitMentioned = Remember && (Result == BB);
- } else if (Numbering == ResultIndex) {
- ExplicitMentioned |= Remember;
- }
- }
-
- /// \brief Is "Result" one of the BBs added with "Remember" = True?
- bool wasResultExplicitMentioned() {
- return ExplicitMentioned;
- }
-
- /// \brief Get the query result
- BasicBlock *getResult() {
- return Result;
- }
-};
-
-/// @brief Transforms the control flow graph on one single entry/exit region
-/// at a time.
-///
-/// After the transform all "If"/"Then"/"Else" style control flow looks like
-/// this:
-///
-/// \verbatim
-/// 1
-/// ||
-/// | |
-/// 2 |
-/// | /
-/// |/
-/// 3
-/// || Where:
-/// | | 1 = "If" block, calculates the condition
-/// 4 | 2 = "Then" subregion, runs if the condition is true
-/// | / 3 = "Flow" blocks, newly inserted flow blocks, rejoins the flow
-/// |/ 4 = "Else" optional subregion, runs if the condition is false
-/// 5 5 = "End" block, also rejoins the control flow
-/// \endverbatim
-///
-/// Control flow is expressed as a branch where the true exit goes into the
-/// "Then"/"Else" region, while the false exit skips the region
-/// The condition for the optional "Else" region is expressed as a PHI node.
-/// The incomming values of the PHI node are true for the "If" edge and false
-/// for the "Then" edge.
-///
-/// Additionally to that even complicated loops look like this:
-///
-/// \verbatim
-/// 1
-/// ||
-/// | |
-/// 2 ^ Where:
-/// | / 1 = "Entry" block
-/// |/ 2 = "Loop" optional subregion, with all exits at "Flow" block
-/// 3 3 = "Flow" block, with back edge to entry block
-/// |
-/// \endverbatim
-///
-/// The back edge of the "Flow" block is always on the false side of the branch
-/// while the true side continues the general flow. So the loop condition
-/// consist of a network of PHI nodes where the true incoming values expresses
-/// breaks and the false values expresses continue states.
-class AMDGPUStructurizeCFG : public RegionPass {
-
- static char ID;
-
- Type *Boolean;
- ConstantInt *BoolTrue;
- ConstantInt *BoolFalse;
- UndefValue *BoolUndef;
-
- Function *Func;
- Region *ParentRegion;
-
- DominatorTree *DT;
-
- RNVector Order;
- BBSet Visited;
-
- BBPhiMap DeletedPhis;
- BB2BBVecMap AddedPhis;
-
- PredMap Predicates;
- BranchVector Conditions;
-
- BB2BBMap Loops;
- PredMap LoopPreds;
- BranchVector LoopConds;
-
- RegionNode *PrevNode;
-
- void orderNodes();
-
- void analyzeLoops(RegionNode *N);
-
- Value *invert(Value *Condition);
-
- Value *buildCondition(BranchInst *Term, unsigned Idx, bool Invert);
-
- void gatherPredicates(RegionNode *N);
-
- void collectInfos();
-
- void insertConditions(bool Loops);
-
- void delPhiValues(BasicBlock *From, BasicBlock *To);
-
- void addPhiValues(BasicBlock *From, BasicBlock *To);
-
- void setPhiValues();
-
- void killTerminator(BasicBlock *BB);
-
- void changeExit(RegionNode *Node, BasicBlock *NewExit,
- bool IncludeDominator);
-
- BasicBlock *getNextFlow(BasicBlock *Dominator);
-
- BasicBlock *needPrefix(bool NeedEmpty);
-
- BasicBlock *needPostfix(BasicBlock *Flow, bool ExitUseAllowed);
-
- void setPrevNode(BasicBlock *BB);
-
- bool dominatesPredicates(BasicBlock *BB, RegionNode *Node);
-
- bool isPredictableTrue(RegionNode *Node);
-
- void wireFlow(bool ExitUseAllowed, BasicBlock *LoopEnd);
-
- void handleLoops(bool ExitUseAllowed, BasicBlock *LoopEnd);
-
- void createFlow();
-
- void rebuildSSA();
-
-public:
- AMDGPUStructurizeCFG():
- RegionPass(ID) {
-
- initializeRegionInfoPass(*PassRegistry::getPassRegistry());
- }
-
- using Pass::doInitialization;
- virtual bool doInitialization(Region *R, RGPassManager &RGM);
-
- virtual bool runOnRegion(Region *R, RGPassManager &RGM);
-
- virtual const char *getPassName() const {
- return "AMDGPU simplify control flow";
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
-
- AU.addRequired<DominatorTree>();
- AU.addPreserved<DominatorTree>();
- RegionPass::getAnalysisUsage(AU);
- }
-
-};
-
-} // end anonymous namespace
-
-char AMDGPUStructurizeCFG::ID = 0;
-
-/// \brief Initialize the types and constants used in the pass
-bool AMDGPUStructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) {
- LLVMContext &Context = R->getEntry()->getContext();
-
- Boolean = Type::getInt1Ty(Context);
- BoolTrue = ConstantInt::getTrue(Context);
- BoolFalse = ConstantInt::getFalse(Context);
- BoolUndef = UndefValue::get(Boolean);
-
- return false;
-}
-
-/// \brief Build up the general order of nodes
-void AMDGPUStructurizeCFG::orderNodes() {
- scc_iterator<Region *> I = scc_begin(ParentRegion),
- E = scc_end(ParentRegion);
- for (Order.clear(); I != E; ++I) {
- std::vector<RegionNode *> &Nodes = *I;
- Order.append(Nodes.begin(), Nodes.end());
- }
-}
-
-/// \brief Determine the end of the loops
-void AMDGPUStructurizeCFG::analyzeLoops(RegionNode *N) {
-
- if (N->isSubRegion()) {
- // Test for exit as back edge
- BasicBlock *Exit = N->getNodeAs<Region>()->getExit();
- if (Visited.count(Exit))
- Loops[Exit] = N->getEntry();
-
- } else {
- // Test for sucessors as back edge
- BasicBlock *BB = N->getNodeAs<BasicBlock>();
- BranchInst *Term = cast<BranchInst>(BB->getTerminator());
-
- for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
- BasicBlock *Succ = Term->getSuccessor(i);
-
- if (Visited.count(Succ))
- Loops[Succ] = BB;
- }
- }
-}
-
-/// \brief Invert the given condition
-Value *AMDGPUStructurizeCFG::invert(Value *Condition) {
-
- // First: Check if it's a constant
- if (Condition == BoolTrue)
- return BoolFalse;
-
- if (Condition == BoolFalse)
- return BoolTrue;
-
- if (Condition == BoolUndef)
- return BoolUndef;
-
- // Second: If the condition is already inverted, return the original value
- if (match(Condition, m_Not(m_Value(Condition))))
- return Condition;
-
- // Third: Check all the users for an invert
- BasicBlock *Parent = cast<Instruction>(Condition)->getParent();
- for (Value::use_iterator I = Condition->use_begin(),
- E = Condition->use_end(); I != E; ++I) {
-
- Instruction *User = dyn_cast<Instruction>(*I);
- if (!User || User->getParent() != Parent)
- continue;
-
- if (match(*I, m_Not(m_Specific(Condition))))
- return *I;
- }
-
- // Last option: Create a new instruction
- return BinaryOperator::CreateNot(Condition, "", Parent->getTerminator());
-}
-
-/// \brief Build the condition for one edge
-Value *AMDGPUStructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx,
- bool Invert) {
- Value *Cond = Invert ? BoolFalse : BoolTrue;
- if (Term->isConditional()) {
- Cond = Term->getCondition();
-
- if (Idx != Invert)
- Cond = invert(Cond);
- }
- return Cond;
-}
-
-/// \brief Analyze the predecessors of each block and build up predicates
-void AMDGPUStructurizeCFG::gatherPredicates(RegionNode *N) {
-
- RegionInfo *RI = ParentRegion->getRegionInfo();
- BasicBlock *BB = N->getEntry();
- BBPredicates &Pred = Predicates[BB];
- BBPredicates &LPred = LoopPreds[BB];
-
- for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
- PI != PE; ++PI) {
-
- // Ignore it if it's a branch from outside into our region entry
- if (!ParentRegion->contains(*PI))
- continue;
-
- Region *R = RI->getRegionFor(*PI);
- if (R == ParentRegion) {
-
- // It's a top level block in our region
- BranchInst *Term = cast<BranchInst>((*PI)->getTerminator());
- for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
- BasicBlock *Succ = Term->getSuccessor(i);
- if (Succ != BB)
- continue;
-
- if (Visited.count(*PI)) {
- // Normal forward edge
- if (Term->isConditional()) {
- // Try to treat it like an ELSE block
- BasicBlock *Other = Term->getSuccessor(!i);
- if (Visited.count(Other) && !Loops.count(Other) &&
- !Pred.count(Other) && !Pred.count(*PI)) {
-
- Pred[Other] = BoolFalse;
- Pred[*PI] = BoolTrue;
- continue;
- }
- }
- Pred[*PI] = buildCondition(Term, i, false);
-
- } else {
- // Back edge
- LPred[*PI] = buildCondition(Term, i, true);
- }
- }
-
- } else {
-
- // It's an exit from a sub region
- while(R->getParent() != ParentRegion)
- R = R->getParent();
-
- // Edge from inside a subregion to its entry, ignore it
- if (R == N)
- continue;
-
- BasicBlock *Entry = R->getEntry();
- if (Visited.count(Entry))
- Pred[Entry] = BoolTrue;
- else
- LPred[Entry] = BoolFalse;
- }
- }
-}
-
-/// \brief Collect various loop and predicate infos
-void AMDGPUStructurizeCFG::collectInfos() {
-
- // Reset predicate
- Predicates.clear();
-
- // and loop infos
- Loops.clear();
- LoopPreds.clear();
-
- // Reset the visited nodes
- Visited.clear();
-
- for (RNVector::reverse_iterator OI = Order.rbegin(), OE = Order.rend();
- OI != OE; ++OI) {
-
- // Analyze all the conditions leading to a node
- gatherPredicates(*OI);
-
- // Remember that we've seen this node
- Visited.insert((*OI)->getEntry());
-
- // Find the last back edges
- analyzeLoops(*OI);
- }
-}
-
-/// \brief Insert the missing branch conditions
-void AMDGPUStructurizeCFG::insertConditions(bool Loops) {
- BranchVector &Conds = Loops ? LoopConds : Conditions;
- Value *Default = Loops ? BoolTrue : BoolFalse;
- SSAUpdater PhiInserter;
-
- for (BranchVector::iterator I = Conds.begin(),
- E = Conds.end(); I != E; ++I) {
-
- BranchInst *Term = *I;
- assert(Term->isConditional());
-
- BasicBlock *Parent = Term->getParent();
- BasicBlock *SuccTrue = Term->getSuccessor(0);
- BasicBlock *SuccFalse = Term->getSuccessor(1);
-
- PhiInserter.Initialize(Boolean, "");
- PhiInserter.AddAvailableValue(&Func->getEntryBlock(), Default);
- PhiInserter.AddAvailableValue(Loops ? SuccFalse : Parent, Default);
-
- BBPredicates &Preds = Loops ? LoopPreds[SuccFalse] : Predicates[SuccTrue];
-
- NearestCommonDominator Dominator(DT);
- Dominator.addBlock(Parent, false);
-
- Value *ParentValue = 0;
- for (BBPredicates::iterator PI = Preds.begin(), PE = Preds.end();
- PI != PE; ++PI) {
-
- if (PI->first == Parent) {
- ParentValue = PI->second;
- break;
- }
- PhiInserter.AddAvailableValue(PI->first, PI->second);
- Dominator.addBlock(PI->first);
- }
-
- if (ParentValue) {
- Term->setCondition(ParentValue);
- } else {
- if (!Dominator.wasResultExplicitMentioned())
- PhiInserter.AddAvailableValue(Dominator.getResult(), Default);
-
- Term->setCondition(PhiInserter.GetValueInMiddleOfBlock(Parent));
- }
- }
-}
-
-/// \brief Remove all PHI values coming from "From" into "To" and remember
-/// them in DeletedPhis
-void AMDGPUStructurizeCFG::delPhiValues(BasicBlock *From, BasicBlock *To) {
- PhiMap &Map = DeletedPhis[To];
- for (BasicBlock::iterator I = To->begin(), E = To->end();
- I != E && isa<PHINode>(*I);) {
-
- PHINode &Phi = cast<PHINode>(*I++);
- while (Phi.getBasicBlockIndex(From) != -1) {
- Value *Deleted = Phi.removeIncomingValue(From, false);
- Map[&Phi].push_back(std::make_pair(From, Deleted));
- }
- }
-}
-
-/// \brief Add a dummy PHI value as soon as we knew the new predecessor
-void AMDGPUStructurizeCFG::addPhiValues(BasicBlock *From, BasicBlock *To) {
- for (BasicBlock::iterator I = To->begin(), E = To->end();
- I != E && isa<PHINode>(*I);) {
-
- PHINode &Phi = cast<PHINode>(*I++);
- Value *Undef = UndefValue::get(Phi.getType());
- Phi.addIncoming(Undef, From);
- }
- AddedPhis[To].push_back(From);
-}
-
-/// \brief Add the real PHI value as soon as everything is set up
-void AMDGPUStructurizeCFG::setPhiValues() {
-
- SSAUpdater Updater;
- for (BB2BBVecMap::iterator AI = AddedPhis.begin(), AE = AddedPhis.end();
- AI != AE; ++AI) {
-
- BasicBlock *To = AI->first;
- BBVector &From = AI->second;
-
- if (!DeletedPhis.count(To))
- continue;
-
- PhiMap &Map = DeletedPhis[To];
- for (PhiMap::iterator PI = Map.begin(), PE = Map.end();
- PI != PE; ++PI) {
-
- PHINode *Phi = PI->first;
- Value *Undef = UndefValue::get(Phi->getType());
- Updater.Initialize(Phi->getType(), "");
- Updater.AddAvailableValue(&Func->getEntryBlock(), Undef);
- Updater.AddAvailableValue(To, Undef);
-
- NearestCommonDominator Dominator(DT);
- Dominator.addBlock(To, false);
- for (BBValueVector::iterator VI = PI->second.begin(),
- VE = PI->second.end(); VI != VE; ++VI) {
-
- Updater.AddAvailableValue(VI->first, VI->second);
- Dominator.addBlock(VI->first);
- }
-
- if (!Dominator.wasResultExplicitMentioned())
- Updater.AddAvailableValue(Dominator.getResult(), Undef);
-
- for (BBVector::iterator FI = From.begin(), FE = From.end();
- FI != FE; ++FI) {
-
- int Idx = Phi->getBasicBlockIndex(*FI);
- assert(Idx != -1);
- Phi->setIncomingValue(Idx, Updater.GetValueAtEndOfBlock(*FI));
- }
- }
-
- DeletedPhis.erase(To);
- }
- assert(DeletedPhis.empty());
-}
-
-/// \brief Remove phi values from all successors and then remove the terminator.
-void AMDGPUStructurizeCFG::killTerminator(BasicBlock *BB) {
- TerminatorInst *Term = BB->getTerminator();
- if (!Term)
- return;
-
- for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB);
- SI != SE; ++SI) {
-
- delPhiValues(BB, *SI);
- }
-
- Term->eraseFromParent();
-}
-
-/// \brief Let node exit(s) point to NewExit
-void AMDGPUStructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit,
- bool IncludeDominator) {
-
- if (Node->isSubRegion()) {
- Region *SubRegion = Node->getNodeAs<Region>();
- BasicBlock *OldExit = SubRegion->getExit();
- BasicBlock *Dominator = 0;
-
- // Find all the edges from the sub region to the exit
- for (pred_iterator I = pred_begin(OldExit), E = pred_end(OldExit);
- I != E;) {
-
- BasicBlock *BB = *I++;
- if (!SubRegion->contains(BB))
- continue;
-
- // Modify the edges to point to the new exit
- delPhiValues(BB, OldExit);
- BB->getTerminator()->replaceUsesOfWith(OldExit, NewExit);
- addPhiValues(BB, NewExit);
-
- // Find the new dominator (if requested)
- if (IncludeDominator) {
- if (!Dominator)
- Dominator = BB;
- else
- Dominator = DT->findNearestCommonDominator(Dominator, BB);
- }
- }
-
- // Change the dominator (if requested)
- if (Dominator)
- DT->changeImmediateDominator(NewExit, Dominator);
-
- // Update the region info
- SubRegion->replaceExit(NewExit);
-
- } else {
- BasicBlock *BB = Node->getNodeAs<BasicBlock>();
- killTerminator(BB);
- BranchInst::Create(NewExit, BB);
- addPhiValues(BB, NewExit);
- if (IncludeDominator)
- DT->changeImmediateDominator(NewExit, BB);
- }
-}
-
-/// \brief Create a new flow node and update dominator tree and region info
-BasicBlock *AMDGPUStructurizeCFG::getNextFlow(BasicBlock *Dominator) {
- LLVMContext &Context = Func->getContext();
- BasicBlock *Insert = Order.empty() ? ParentRegion->getExit() :
- Order.back()->getEntry();
- BasicBlock *Flow = BasicBlock::Create(Context, FlowBlockName,
- Func, Insert);
- DT->addNewBlock(Flow, Dominator);
- ParentRegion->getRegionInfo()->setRegionFor(Flow, ParentRegion);
- return Flow;
-}
-
-/// \brief Create a new or reuse the previous node as flow node
-BasicBlock *AMDGPUStructurizeCFG::needPrefix(bool NeedEmpty) {
-
- BasicBlock *Entry = PrevNode->getEntry();
-
- if (!PrevNode->isSubRegion()) {
- killTerminator(Entry);
- if (!NeedEmpty || Entry->getFirstInsertionPt() == Entry->end())
- return Entry;
-
- }
-
- // create a new flow node
- BasicBlock *Flow = getNextFlow(Entry);
-
- // and wire it up
- changeExit(PrevNode, Flow, true);
- PrevNode = ParentRegion->getBBNode(Flow);
- return Flow;
-}
-
-/// \brief Returns the region exit if possible, otherwise just a new flow node
-BasicBlock *AMDGPUStructurizeCFG::needPostfix(BasicBlock *Flow,
- bool ExitUseAllowed) {
-
- if (Order.empty() && ExitUseAllowed) {
- BasicBlock *Exit = ParentRegion->getExit();
- DT->changeImmediateDominator(Exit, Flow);
- addPhiValues(Flow, Exit);
- return Exit;
- }
- return getNextFlow(Flow);
-}
-
-/// \brief Set the previous node
-void AMDGPUStructurizeCFG::setPrevNode(BasicBlock *BB) {
- PrevNode = ParentRegion->contains(BB) ? ParentRegion->getBBNode(BB) : 0;
-}
-
-/// \brief Does BB dominate all the predicates of Node ?
-bool AMDGPUStructurizeCFG::dominatesPredicates(BasicBlock *BB, RegionNode *Node) {
- BBPredicates &Preds = Predicates[Node->getEntry()];
- for (BBPredicates::iterator PI = Preds.begin(), PE = Preds.end();
- PI != PE; ++PI) {
-
- if (!DT->dominates(BB, PI->first))
- return false;
- }
- return true;
-}
-
-/// \brief Can we predict that this node will always be called?
-bool AMDGPUStructurizeCFG::isPredictableTrue(RegionNode *Node) {
-
- BBPredicates &Preds = Predicates[Node->getEntry()];
- bool Dominated = false;
-
- // Regionentry is always true
- if (PrevNode == 0)
- return true;
-
- for (BBPredicates::iterator I = Preds.begin(), E = Preds.end();
- I != E; ++I) {
-
- if (I->second != BoolTrue)
- return false;
-
- if (!Dominated && DT->dominates(I->first, PrevNode->getEntry()))
- Dominated = true;
- }
-
- // TODO: The dominator check is too strict
- return Dominated;
-}
-
-/// Take one node from the order vector and wire it up
-void AMDGPUStructurizeCFG::wireFlow(bool ExitUseAllowed,
- BasicBlock *LoopEnd) {
-
- RegionNode *Node = Order.pop_back_val();
- Visited.insert(Node->getEntry());
-
- if (isPredictableTrue(Node)) {
- // Just a linear flow
- if (PrevNode) {
- changeExit(PrevNode, Node->getEntry(), true);
- }
- PrevNode = Node;
-
- } else {
- // Insert extra prefix node (or reuse last one)
- BasicBlock *Flow = needPrefix(false);
-
- // Insert extra postfix node (or use exit instead)
- BasicBlock *Entry = Node->getEntry();
- BasicBlock *Next = needPostfix(Flow, ExitUseAllowed);
-
- // let it point to entry and next block
- Conditions.push_back(BranchInst::Create(Entry, Next, BoolUndef, Flow));
- addPhiValues(Flow, Entry);
- DT->changeImmediateDominator(Entry, Flow);
-
- PrevNode = Node;
- while (!Order.empty() && !Visited.count(LoopEnd) &&
- dominatesPredicates(Entry, Order.back())) {
- handleLoops(false, LoopEnd);
- }
-
- changeExit(PrevNode, Next, false);
- setPrevNode(Next);
- }
-}
-
-void AMDGPUStructurizeCFG::handleLoops(bool ExitUseAllowed,
- BasicBlock *LoopEnd) {
- RegionNode *Node = Order.back();
- BasicBlock *LoopStart = Node->getEntry();
-
- if (!Loops.count(LoopStart)) {
- wireFlow(ExitUseAllowed, LoopEnd);
- return;
- }
-
- if (!isPredictableTrue(Node))
- LoopStart = needPrefix(true);
-
- LoopEnd = Loops[Node->getEntry()];
- wireFlow(false, LoopEnd);
- while (!Visited.count(LoopEnd)) {
- handleLoops(false, LoopEnd);
- }
-
- // Create an extra loop end node
- LoopEnd = needPrefix(false);
- BasicBlock *Next = needPostfix(LoopEnd, ExitUseAllowed);
- LoopConds.push_back(BranchInst::Create(Next, LoopStart,
- BoolUndef, LoopEnd));
- addPhiValues(LoopEnd, LoopStart);
- setPrevNode(Next);
-}
-
-/// After this function control flow looks like it should be, but
-/// branches and PHI nodes only have undefined conditions.
-void AMDGPUStructurizeCFG::createFlow() {
-
- BasicBlock *Exit = ParentRegion->getExit();
- bool EntryDominatesExit = DT->dominates(ParentRegion->getEntry(), Exit);
-
- DeletedPhis.clear();
- AddedPhis.clear();
- Conditions.clear();
- LoopConds.clear();
-
- PrevNode = 0;
- Visited.clear();
-
- while (!Order.empty()) {
- handleLoops(EntryDominatesExit, 0);
- }
-
- if (PrevNode)
- changeExit(PrevNode, Exit, EntryDominatesExit);
- else
- assert(EntryDominatesExit);
-}
-
-/// Handle a rare case where the disintegrated nodes instructions
-/// no longer dominate all their uses. Not sure if this is really nessasary
-void AMDGPUStructurizeCFG::rebuildSSA() {
- SSAUpdater Updater;
- for (Region::block_iterator I = ParentRegion->block_begin(),
- E = ParentRegion->block_end();
- I != E; ++I) {
-
- BasicBlock *BB = *I;
- for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
- II != IE; ++II) {
-
- bool Initialized = false;
- for (Use *I = &II->use_begin().getUse(), *Next; I; I = Next) {
-
- Next = I->getNext();
-
- Instruction *User = cast<Instruction>(I->getUser());
- if (User->getParent() == BB) {
- continue;
-
- } else if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
- if (UserPN->getIncomingBlock(*I) == BB)
- continue;
- }
-
- if (DT->dominates(II, User))
- continue;
-
- if (!Initialized) {
- Value *Undef = UndefValue::get(II->getType());
- Updater.Initialize(II->getType(), "");
- Updater.AddAvailableValue(&Func->getEntryBlock(), Undef);
- Updater.AddAvailableValue(BB, II);
- Initialized = true;
- }
- Updater.RewriteUseAfterInsertions(*I);
- }
- }
- }
-}
-
-/// \brief Run the transformation for each region found
-bool AMDGPUStructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) {
- if (R->isTopLevelRegion())
- return false;
-
- Func = R->getEntry()->getParent();
- ParentRegion = R;
-
- DT = &getAnalysis<DominatorTree>();
-
- orderNodes();
- collectInfos();
- createFlow();
- insertConditions(false);
- insertConditions(true);
- setPhiValues();
- rebuildSSA();
-
- // Cleanup
- Order.clear();
- Visited.clear();
- DeletedPhis.clear();
- AddedPhis.clear();
- Predicates.clear();
- Conditions.clear();
- Loops.clear();
- LoopPreds.clear();
- LoopConds.clear();
-
- return true;
-}
-
-/// \brief Create the pass
-Pass *llvm::createAMDGPUStructurizeCFGPass() {
- return new AMDGPUStructurizeCFG();
-}
diff --git a/lib/Target/R600/AMDGPUSubtarget.cpp b/lib/Target/R600/AMDGPUSubtarget.cpp
index a7e1d7b..061793a 100644
--- a/lib/Target/R600/AMDGPUSubtarget.cpp
+++ b/lib/Target/R600/AMDGPUSubtarget.cpp
@@ -25,8 +25,6 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS) :
AMDGPUGenSubtargetInfo(TT, CPU, FS), DumpCode(false) {
InstrItins = getInstrItineraryForCPU(CPU);
- memset(CapsOverride, 0, sizeof(*CapsOverride)
- * AMDGPUDeviceInfo::MaxNumberCapabilities);
// Default card
StringRef GPU = CPU;
Is64bit = false;
@@ -34,21 +32,16 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS) :
DefaultSize[1] = 1;
DefaultSize[2] = 1;
HasVertexCache = false;
+ TexVTXClauseSize = 0;
+ Gen = AMDGPUSubtarget::R600;
+ FP64 = false;
+ CaymanISA = false;
+ EnableIRStructurizer = true;
+ EnableIfCvt = true;
ParseSubtargetFeatures(GPU, FS);
DevName = GPU;
- Device = AMDGPUDeviceInfo::getDeviceFromName(DevName, this, Is64bit);
}
-AMDGPUSubtarget::~AMDGPUSubtarget() {
- delete Device;
-}
-
-bool
-AMDGPUSubtarget::isOverride(AMDGPUDeviceInfo::Caps caps) const {
- assert(caps < AMDGPUDeviceInfo::MaxNumberCapabilities &&
- "Caps index is out of bounds!");
- return CapsOverride[caps];
-}
bool
AMDGPUSubtarget::is64bit() const {
return Is64bit;
@@ -57,6 +50,30 @@ bool
AMDGPUSubtarget::hasVertexCache() const {
return HasVertexCache;
}
+short
+AMDGPUSubtarget::getTexVTXClauseSize() const {
+ return TexVTXClauseSize;
+}
+enum AMDGPUSubtarget::Generation
+AMDGPUSubtarget::getGeneration() const {
+ return Gen;
+}
+bool
+AMDGPUSubtarget::hasHWFP64() const {
+ return FP64;
+}
+bool
+AMDGPUSubtarget::hasCaymanISA() const {
+ return CaymanISA;
+}
+bool
+AMDGPUSubtarget::IsIRStructurizerEnabled() const {
+ return EnableIRStructurizer;
+}
+bool
+AMDGPUSubtarget::isIfCvtEnabled() const {
+ return EnableIfCvt;
+}
bool
AMDGPUSubtarget::isTargetELF() const {
return false;
@@ -72,21 +89,32 @@ AMDGPUSubtarget::getDefaultSize(uint32_t dim) const {
std::string
AMDGPUSubtarget::getDataLayout() const {
- if (!Device) {
- return std::string("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16"
- "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:32:32"
- "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64"
- "-v96:128:128-v128:128:128-v192:256:256-v256:256:256"
- "-v512:512:512-v1024:1024:1024-v2048:2048:2048-a0:0:64");
- }
- return Device->getDataLayout();
+ std::string DataLayout = std::string(
+ "e"
+ "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32"
+ "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128"
+ "-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+ "-n32:64"
+ );
+
+ if (hasHWFP64()) {
+ DataLayout.append("-f64:64:64");
+ }
+
+ if (is64bit()) {
+ DataLayout.append("-p:64:64:64");
+ } else {
+ DataLayout.append("-p:32:32:32");
+ }
+
+ if (Gen >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ DataLayout.append("-p3:32:32:32");
+ }
+
+ return DataLayout;
}
std::string
AMDGPUSubtarget::getDeviceName() const {
return DevName;
}
-const AMDGPUDevice *
-AMDGPUSubtarget::device() const {
- return Device;
-}
diff --git a/lib/Target/R600/AMDGPUSubtarget.h b/lib/Target/R600/AMDGPUSubtarget.h
index b6501a4..4288d27 100644
--- a/lib/Target/R600/AMDGPUSubtarget.h
+++ b/lib/Target/R600/AMDGPUSubtarget.h
@@ -14,7 +14,7 @@
#ifndef AMDGPUSUBTARGET_H
#define AMDGPUSUBTARGET_H
-#include "AMDILDevice.h"
+#include "AMDGPU.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Target/TargetSubtargetInfo.h"
@@ -27,9 +27,17 @@
namespace llvm {
class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo {
+public:
+ enum Generation {
+ R600 = 0,
+ R700,
+ EVERGREEN,
+ NORTHERN_ISLANDS,
+ SOUTHERN_ISLANDS,
+ SEA_ISLANDS
+ };
+
private:
- bool CapsOverride[AMDGPUDeviceInfo::MaxNumberCapabilities];
- const AMDGPUDevice *Device;
size_t DefaultSize[3];
std::string DevName;
bool Is64bit;
@@ -37,23 +45,36 @@ private:
bool DumpCode;
bool R600ALUInst;
bool HasVertexCache;
+ short TexVTXClauseSize;
+ enum Generation Gen;
+ bool FP64;
+ bool CaymanISA;
+ bool EnableIRStructurizer;
+ bool EnableIfCvt;
InstrItineraryData InstrItins;
public:
AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS);
- virtual ~AMDGPUSubtarget();
const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
virtual void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
- bool isOverride(AMDGPUDeviceInfo::Caps) const;
bool is64bit() const;
bool hasVertexCache() const;
+ short getTexVTXClauseSize() const;
+ enum Generation getGeneration() const;
+ bool hasHWFP64() const;
+ bool hasCaymanISA() const;
+ bool IsIRStructurizerEnabled() const;
+ bool isIfCvtEnabled() const;
+
+ virtual bool enableMachineScheduler() const {
+ return getGeneration() <= NORTHERN_ISLANDS;
+ }
// Helper functions to simplify if statements
bool isTargetELF() const;
- const AMDGPUDevice* device() const;
std::string getDataLayout() const;
std::string getDeviceName() const;
virtual size_t getDefaultSize(uint32_t dim) const;
diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp
index 31fbf32..bc4f5d7 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.cpp
+++ b/lib/Target/R600/AMDGPUTargetMachine.cpp
@@ -33,6 +33,7 @@
#include "llvm/Transforms/Scalar.h"
#include <llvm/CodeGen/Passes.h>
+
using namespace llvm;
extern "C" void LLVMInitializeR600Target() {
@@ -59,17 +60,19 @@ AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
Subtarget(TT, CPU, FS),
Layout(Subtarget.getDataLayout()),
FrameLowering(TargetFrameLowering::StackGrowsUp,
- Subtarget.device()->getStackAlignment(), 0),
+ 64 * 16 // Maximum stack alignment (long16)
+ , 0),
IntrinsicInfo(this),
InstrItins(&Subtarget.getInstrItineraryData()) {
// TLInfo uses InstrInfo so it must be initialized after.
- if (Subtarget.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- InstrInfo = new R600InstrInfo(*this);
- TLInfo = new R600TargetLowering(*this);
+ if (Subtarget.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ InstrInfo.reset(new R600InstrInfo(*this));
+ TLInfo.reset(new R600TargetLowering(*this));
} else {
- InstrInfo = new SIInstrInfo(*this);
- TLInfo = new SITargetLowering(*this);
+ InstrInfo.reset(new SIInstrInfo(*this));
+ TLInfo.reset(new SITargetLowering(*this));
}
+ initAsmInfo();
}
AMDGPUTargetMachine::~AMDGPUTargetMachine() {
@@ -79,18 +82,20 @@ namespace {
class AMDGPUPassConfig : public TargetPassConfig {
public:
AMDGPUPassConfig(AMDGPUTargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {
- const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- enablePass(&MachineSchedulerID);
- MachineSchedRegistry::setDefault(createR600MachineScheduler);
- }
- }
+ : TargetPassConfig(TM, PM) {}
AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
return getTM<AMDGPUTargetMachine>();
}
+ virtual ScheduleDAGInstrs *
+ createMachineScheduler(MachineSchedContext *C) const {
+ const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ return createR600MachineScheduler(C);
+ return 0;
+ }
+
virtual bool addPreISel();
virtual bool addInstSelector();
virtual bool addPreRegAlloc();
@@ -104,53 +109,76 @@ TargetPassConfig *AMDGPUTargetMachine::createPassConfig(PassManagerBase &PM) {
return new AMDGPUPassConfig(this, PM);
}
+//===----------------------------------------------------------------------===//
+// AMDGPU Analysis Pass Setup
+//===----------------------------------------------------------------------===//
+
+void AMDGPUTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
+ // Add first the target-independent BasicTTI pass, then our AMDGPU pass. This
+ // allows the AMDGPU pass to delegate to the target independent layer when
+ // appropriate.
+ PM.add(createBasicTargetTransformInfoPass(this));
+ PM.add(createAMDGPUTargetTransformInfoPass(this));
+}
+
bool
AMDGPUPassConfig::addPreISel() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
- addPass(createAMDGPUStructurizeCFGPass());
+ addPass(createFlattenCFGPass());
+ if (ST.IsIRStructurizerEnabled())
+ addPass(createStructurizeCFGPass());
+ if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ addPass(createSinkingPass());
+ addPass(createSITypeRewriter());
addPass(createSIAnnotateControlFlowPass());
+ } else {
+ addPass(createR600TextureIntrinsicsReplacer());
}
return false;
}
bool AMDGPUPassConfig::addInstSelector() {
addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
-
- const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- // This callbacks this pass uses are not implemented yet on SI.
- addPass(createAMDGPUIndirectAddressingPass(*TM));
- }
return false;
}
bool AMDGPUPassConfig::addPreRegAlloc() {
addPass(createAMDGPUConvertToISAPass(*TM));
+ const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
+
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
+ addPass(createR600VectorRegMerger(*TM));
+ } else {
+ addPass(createSIFixSGPRCopiesPass(*TM));
+ }
return false;
}
bool AMDGPUPassConfig::addPostRegAlloc() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
+ if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
addPass(createSIInsertWaits(*TM));
}
return false;
}
bool AMDGPUPassConfig::addPreSched2() {
+ const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- addPass(&IfConverterID);
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ addPass(createR600EmitClauseMarkers(*TM));
+ if (ST.isIfCvtEnabled())
+ addPass(&IfConverterID);
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
+ addPass(createR600ClauseMergePass(*TM));
return false;
}
bool AMDGPUPassConfig::addPreEmitPass() {
const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- addPass(createAMDGPUCFGPreparationPass(*TM));
+ if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
addPass(createAMDGPUCFGStructurizerPass(*TM));
- addPass(createR600EmitClauseMarkers(*TM));
addPass(createR600ExpandSpecialInstrsPass(*TM));
addPass(&FinalizeMachineBundlesID);
addPass(createR600Packetizer(*TM));
@@ -161,4 +189,3 @@ bool AMDGPUPassConfig::addPreEmitPass() {
return false;
}
-
diff --git a/lib/Target/R600/AMDGPUTargetMachine.h b/lib/Target/R600/AMDGPUTargetMachine.h
index 2afe787..f942614 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.h
+++ b/lib/Target/R600/AMDGPUTargetMachine.h
@@ -25,44 +25,45 @@
namespace llvm {
-MCAsmInfo* createMCAsmInfo(const Target &T, StringRef TT);
-
class AMDGPUTargetMachine : public LLVMTargetMachine {
AMDGPUSubtarget Subtarget;
const DataLayout Layout;
AMDGPUFrameLowering FrameLowering;
AMDGPUIntrinsicInfo IntrinsicInfo;
- const AMDGPUInstrInfo * InstrInfo;
- AMDGPUTargetLowering * TLInfo;
- const InstrItineraryData* InstrItins;
+ OwningPtr<AMDGPUInstrInfo> InstrInfo;
+ OwningPtr<AMDGPUTargetLowering> TLInfo;
+ const InstrItineraryData *InstrItins;
public:
- AMDGPUTargetMachine(const Target &T, StringRef TT, StringRef FS,
- StringRef CPU,
- TargetOptions Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL);
- ~AMDGPUTargetMachine();
- virtual const AMDGPUFrameLowering* getFrameLowering() const {
- return &FrameLowering;
- }
- virtual const AMDGPUIntrinsicInfo* getIntrinsicInfo() const {
- return &IntrinsicInfo;
- }
- virtual const AMDGPUInstrInfo *getInstrInfo() const {return InstrInfo;}
- virtual const AMDGPUSubtarget *getSubtargetImpl() const {return &Subtarget; }
- virtual const AMDGPURegisterInfo *getRegisterInfo() const {
- return &InstrInfo->getRegisterInfo();
- }
- virtual AMDGPUTargetLowering * getTargetLowering() const {
- return TLInfo;
- }
- virtual const InstrItineraryData* getInstrItineraryData() const {
- return InstrItins;
- }
- virtual const DataLayout* getDataLayout() const { return &Layout; }
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ AMDGPUTargetMachine(const Target &T, StringRef TT, StringRef FS,
+ StringRef CPU, TargetOptions Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
+ ~AMDGPUTargetMachine();
+ virtual const AMDGPUFrameLowering *getFrameLowering() const {
+ return &FrameLowering;
+ }
+ virtual const AMDGPUIntrinsicInfo *getIntrinsicInfo() const {
+ return &IntrinsicInfo;
+ }
+ virtual const AMDGPUInstrInfo *getInstrInfo() const {
+ return InstrInfo.get();
+ }
+ virtual const AMDGPUSubtarget *getSubtargetImpl() const { return &Subtarget; }
+ virtual const AMDGPURegisterInfo *getRegisterInfo() const {
+ return &InstrInfo->getRegisterInfo();
+ }
+ virtual AMDGPUTargetLowering *getTargetLowering() const {
+ return TLInfo.get();
+ }
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return InstrItins;
+ }
+ virtual const DataLayout *getDataLayout() const { return &Layout; }
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+
+ /// \brief Register R600 analysis passes with a pass manager.
+ virtual void addAnalysisPasses(PassManagerBase &PM);
};
} // End namespace llvm
diff --git a/lib/Target/R600/AMDGPUTargetTransformInfo.cpp b/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
new file mode 100644
index 0000000..8db319c
--- /dev/null
+++ b/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
@@ -0,0 +1,90 @@
+//===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file implements a TargetTransformInfo analysis pass specific to the
+// AMDGPU target machine. It uses the target's detailed information to provide
+// more precise answers to certain TTI queries, while letting the target
+// independent and default TTI implementations handle the rest.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "AMDGPUtti"
+#include "AMDGPU.h"
+#include "AMDGPUTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/CostTable.h"
+using namespace llvm;
+
+// Declare the pass initialization routine locally as target-specific passes
+// don't have a target-wide initialization entry point, and so we rely on the
+// pass constructor initialization.
+namespace llvm {
+void initializeAMDGPUTTIPass(PassRegistry &);
+}
+
+namespace {
+
+class AMDGPUTTI : public ImmutablePass, public TargetTransformInfo {
+ const AMDGPUTargetMachine *TM;
+ const AMDGPUSubtarget *ST;
+ const AMDGPUTargetLowering *TLI;
+
+ /// Estimate the overhead of scalarizing an instruction. Insert and Extract
+ /// are set if the result needs to be inserted and/or extracted from vectors.
+ unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
+
+public:
+ AMDGPUTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
+ llvm_unreachable("This pass cannot be directly constructed");
+ }
+
+ AMDGPUTTI(const AMDGPUTargetMachine *TM)
+ : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
+ TLI(TM->getTargetLowering()) {
+ initializeAMDGPUTTIPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void initializePass() { pushTTIStack(this); }
+
+ virtual void finalizePass() { popTTIStack(); }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ TargetTransformInfo::getAnalysisUsage(AU);
+ }
+
+ /// Pass identification.
+ static char ID;
+
+ /// Provide necessary pointer adjustments for the two base classes.
+ virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ if (ID == &TargetTransformInfo::ID)
+ return (TargetTransformInfo *)this;
+ return this;
+ }
+
+ virtual bool hasBranchDivergence() const;
+
+ /// @}
+};
+
+} // end anonymous namespace
+
+INITIALIZE_AG_PASS(AMDGPUTTI, TargetTransformInfo, "AMDGPUtti",
+ "AMDGPU Target Transform Info", true, true, false)
+char AMDGPUTTI::ID = 0;
+
+ImmutablePass *
+llvm::createAMDGPUTargetTransformInfoPass(const AMDGPUTargetMachine *TM) {
+ return new AMDGPUTTI(TM);
+}
+
+bool AMDGPUTTI::hasBranchDivergence() const { return true; }
diff --git a/lib/Target/R600/AMDIL.h b/lib/Target/R600/AMDIL.h
deleted file mode 100644
index 39ab664..0000000
--- a/lib/Target/R600/AMDIL.h
+++ /dev/null
@@ -1,121 +0,0 @@
-//===-- AMDIL.h - Top-level interface for AMDIL representation --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// This file contains the entry points for global functions defined in the LLVM
-/// AMDGPU back-end.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef AMDIL_H
-#define AMDIL_H
-
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/Target/TargetMachine.h"
-
-#define ARENA_SEGMENT_RESERVED_UAVS 12
-#define DEFAULT_ARENA_UAV_ID 8
-#define DEFAULT_RAW_UAV_ID 7
-#define GLOBAL_RETURN_RAW_UAV_ID 11
-#define HW_MAX_NUM_CB 8
-#define MAX_NUM_UNIQUE_UAVS 8
-#define OPENCL_MAX_NUM_ATOMIC_COUNTERS 8
-#define OPENCL_MAX_READ_IMAGES 128
-#define OPENCL_MAX_WRITE_IMAGES 8
-#define OPENCL_MAX_SAMPLERS 16
-
-// The next two values can never be zero, as zero is the ID that is
-// used to assert against.
-#define DEFAULT_LDS_ID 1
-#define DEFAULT_GDS_ID 1
-#define DEFAULT_SCRATCH_ID 1
-#define DEFAULT_VEC_SLOTS 8
-
-#define OCL_DEVICE_RV710 0x0001
-#define OCL_DEVICE_RV730 0x0002
-#define OCL_DEVICE_RV770 0x0004
-#define OCL_DEVICE_CEDAR 0x0008
-#define OCL_DEVICE_REDWOOD 0x0010
-#define OCL_DEVICE_JUNIPER 0x0020
-#define OCL_DEVICE_CYPRESS 0x0040
-#define OCL_DEVICE_CAICOS 0x0080
-#define OCL_DEVICE_TURKS 0x0100
-#define OCL_DEVICE_BARTS 0x0200
-#define OCL_DEVICE_CAYMAN 0x0400
-#define OCL_DEVICE_ALL 0x3FFF
-
-/// The number of function ID's that are reserved for
-/// internal compiler usage.
-const unsigned int RESERVED_FUNCS = 1024;
-
-namespace llvm {
-class AMDGPUInstrPrinter;
-class FunctionPass;
-class MCAsmInfo;
-class raw_ostream;
-class Target;
-class TargetMachine;
-
-// Instruction selection passes.
-FunctionPass*
- createAMDGPUISelDag(TargetMachine &TM);
-FunctionPass*
- createAMDGPUPeepholeOpt(TargetMachine &TM);
-
-// Pre emit passes.
-FunctionPass*
- createAMDGPUCFGPreparationPass(TargetMachine &TM);
-FunctionPass*
- createAMDGPUCFGStructurizerPass(TargetMachine &TM);
-
-extern Target TheAMDGPUTarget;
-} // end namespace llvm;
-
-// Include device information enumerations
-#include "AMDILDeviceInfo.h"
-
-namespace llvm {
-/// OpenCL uses address spaces to differentiate between
-/// various memory regions on the hardware. On the CPU
-/// all of the address spaces point to the same memory,
-/// however on the GPU, each address space points to
-/// a seperate piece of memory that is unique from other
-/// memory locations.
-namespace AMDGPUAS {
-enum AddressSpaces {
- PRIVATE_ADDRESS = 0, ///< Address space for private memory.
- GLOBAL_ADDRESS = 1, ///< Address space for global memory (RAT0, VTX0).
- CONSTANT_ADDRESS = 2, ///< Address space for constant memory
- LOCAL_ADDRESS = 3, ///< Address space for local memory.
- REGION_ADDRESS = 4, ///< Address space for region memory.
- ADDRESS_NONE = 5, ///< Address space for unknown memory.
- PARAM_D_ADDRESS = 6, ///< Address space for direct addressible parameter memory (CONST0)
- PARAM_I_ADDRESS = 7, ///< Address space for indirect addressible parameter memory (VTX1)
- CONSTANT_BUFFER_0 = 8,
- CONSTANT_BUFFER_1 = 9,
- CONSTANT_BUFFER_2 = 10,
- CONSTANT_BUFFER_3 = 11,
- CONSTANT_BUFFER_4 = 12,
- CONSTANT_BUFFER_5 = 13,
- CONSTANT_BUFFER_6 = 14,
- CONSTANT_BUFFER_7 = 15,
- CONSTANT_BUFFER_8 = 16,
- CONSTANT_BUFFER_9 = 17,
- CONSTANT_BUFFER_10 = 18,
- CONSTANT_BUFFER_11 = 19,
- CONSTANT_BUFFER_12 = 20,
- CONSTANT_BUFFER_13 = 21,
- CONSTANT_BUFFER_14 = 22,
- CONSTANT_BUFFER_15 = 23,
- LAST_ADDRESS = 24
-};
-
-} // namespace AMDGPUAS
-
-} // end namespace llvm
-#endif // AMDIL_H
diff --git a/lib/Target/R600/AMDIL7XXDevice.cpp b/lib/Target/R600/AMDIL7XXDevice.cpp
deleted file mode 100644
index ea6ac34..0000000
--- a/lib/Target/R600/AMDIL7XXDevice.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-//===-- AMDIL7XXDevice.cpp - Device Info for 7XX GPUs ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-// \file
-//==-----------------------------------------------------------------------===//
-#include "AMDIL7XXDevice.h"
-#include "AMDGPUSubtarget.h"
-#include "AMDILDevice.h"
-
-using namespace llvm;
-
-AMDGPU7XXDevice::AMDGPU7XXDevice(AMDGPUSubtarget *ST) : AMDGPUDevice(ST) {
- setCaps();
- std::string name = mSTM->getDeviceName();
- if (name == "rv710") {
- DeviceFlag = OCL_DEVICE_RV710;
- } else if (name == "rv730") {
- DeviceFlag = OCL_DEVICE_RV730;
- } else {
- DeviceFlag = OCL_DEVICE_RV770;
- }
-}
-
-AMDGPU7XXDevice::~AMDGPU7XXDevice() {
-}
-
-void AMDGPU7XXDevice::setCaps() {
- mSWBits.set(AMDGPUDeviceInfo::LocalMem);
-}
-
-size_t AMDGPU7XXDevice::getMaxLDSSize() const {
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return MAX_LDS_SIZE_700;
- }
- return 0;
-}
-
-size_t AMDGPU7XXDevice::getWavefrontSize() const {
- return AMDGPUDevice::HalfWavefrontSize;
-}
-
-uint32_t AMDGPU7XXDevice::getGeneration() const {
- return AMDGPUDeviceInfo::HD4XXX;
-}
-
-uint32_t AMDGPU7XXDevice::getResourceID(uint32_t DeviceID) const {
- switch (DeviceID) {
- default:
- assert(0 && "ID type passed in is unknown!");
- break;
- case GLOBAL_ID:
- case CONSTANT_ID:
- case RAW_UAV_ID:
- case ARENA_UAV_ID:
- break;
- case LDS_ID:
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return DEFAULT_LDS_ID;
- }
- break;
- case SCRATCH_ID:
- if (usesHardware(AMDGPUDeviceInfo::PrivateMem)) {
- return DEFAULT_SCRATCH_ID;
- }
- break;
- case GDS_ID:
- assert(0 && "GDS UAV ID is not supported on this chip");
- if (usesHardware(AMDGPUDeviceInfo::RegionMem)) {
- return DEFAULT_GDS_ID;
- }
- break;
- };
-
- return 0;
-}
-
-uint32_t AMDGPU7XXDevice::getMaxNumUAVs() const {
- return 1;
-}
-
-AMDGPU770Device::AMDGPU770Device(AMDGPUSubtarget *ST): AMDGPU7XXDevice(ST) {
- setCaps();
-}
-
-AMDGPU770Device::~AMDGPU770Device() {
-}
-
-void AMDGPU770Device::setCaps() {
- if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) {
- mSWBits.set(AMDGPUDeviceInfo::FMA);
- mHWBits.set(AMDGPUDeviceInfo::DoubleOps);
- }
- mSWBits.set(AMDGPUDeviceInfo::BarrierDetect);
- mHWBits.reset(AMDGPUDeviceInfo::LongOps);
- mSWBits.set(AMDGPUDeviceInfo::LongOps);
- mSWBits.set(AMDGPUDeviceInfo::LocalMem);
-}
-
-size_t AMDGPU770Device::getWavefrontSize() const {
- return AMDGPUDevice::WavefrontSize;
-}
-
-AMDGPU710Device::AMDGPU710Device(AMDGPUSubtarget *ST) : AMDGPU7XXDevice(ST) {
-}
-
-AMDGPU710Device::~AMDGPU710Device() {
-}
-
-size_t AMDGPU710Device::getWavefrontSize() const {
- return AMDGPUDevice::QuarterWavefrontSize;
-}
diff --git a/lib/Target/R600/AMDIL7XXDevice.h b/lib/Target/R600/AMDIL7XXDevice.h
deleted file mode 100644
index 1cf4ca4..0000000
--- a/lib/Target/R600/AMDIL7XXDevice.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//==-- AMDIL7XXDevice.h - Define 7XX Device Device for AMDIL ---*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-/// \file
-/// \brief Interface for the subtarget data classes.
-///
-/// This file will define the interface that each generation needs to
-/// implement in order to correctly answer queries on the capabilities of the
-/// specific hardware.
-//===----------------------------------------------------------------------===//
-#ifndef AMDIL7XXDEVICEIMPL_H
-#define AMDIL7XXDEVICEIMPL_H
-#include "AMDILDevice.h"
-
-namespace llvm {
-class AMDGPUSubtarget;
-
-//===----------------------------------------------------------------------===//
-// 7XX generation of devices and their respective sub classes
-//===----------------------------------------------------------------------===//
-
-/// \brief The AMDGPU7XXDevice class represents the generic 7XX device.
-///
-/// All 7XX devices are derived from this class. The AMDGPU7XX device will only
-/// support the minimal features that are required to be considered OpenCL 1.0
-/// compliant and nothing more.
-class AMDGPU7XXDevice : public AMDGPUDevice {
-public:
- AMDGPU7XXDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPU7XXDevice();
- virtual size_t getMaxLDSSize() const;
- virtual size_t getWavefrontSize() const;
- virtual uint32_t getGeneration() const;
- virtual uint32_t getResourceID(uint32_t DeviceID) const;
- virtual uint32_t getMaxNumUAVs() const;
-
-protected:
- virtual void setCaps();
-};
-
-/// \brief The AMDGPU770Device class represents the RV770 chip and it's
-/// derivative cards.
-///
-/// The difference between this device and the base class is this device device
-/// adds support for double precision and has a larger wavefront size.
-class AMDGPU770Device : public AMDGPU7XXDevice {
-public:
- AMDGPU770Device(AMDGPUSubtarget *ST);
- virtual ~AMDGPU770Device();
- virtual size_t getWavefrontSize() const;
-private:
- virtual void setCaps();
-};
-
-/// \brief The AMDGPU710Device class derives from the 7XX base class.
-///
-/// This class is a smaller derivative, so we need to overload some of the
-/// functions in order to correctly specify this information.
-class AMDGPU710Device : public AMDGPU7XXDevice {
-public:
- AMDGPU710Device(AMDGPUSubtarget *ST);
- virtual ~AMDGPU710Device();
- virtual size_t getWavefrontSize() const;
-};
-
-} // namespace llvm
-#endif // AMDILDEVICEIMPL_H
diff --git a/lib/Target/R600/AMDILBase.td b/lib/Target/R600/AMDILBase.td
index e221110..5dcd478 100644
--- a/lib/Target/R600/AMDILBase.td
+++ b/lib/Target/R600/AMDILBase.td
@@ -16,70 +16,6 @@ def ALU_NULL : FuncUnit;
def NullALU : InstrItinClass;
//===----------------------------------------------------------------------===//
-// AMDIL Subtarget features.
-//===----------------------------------------------------------------------===//
-def FeatureFP64 : SubtargetFeature<"fp64",
- "CapsOverride[AMDGPUDeviceInfo::DoubleOps]",
- "true",
- "Enable 64bit double precision operations">;
-def FeatureByteAddress : SubtargetFeature<"byte_addressable_store",
- "CapsOverride[AMDGPUDeviceInfo::ByteStores]",
- "true",
- "Enable byte addressable stores">;
-def FeatureBarrierDetect : SubtargetFeature<"barrier_detect",
- "CapsOverride[AMDGPUDeviceInfo::BarrierDetect]",
- "true",
- "Enable duplicate barrier detection(HD5XXX or later).">;
-def FeatureImages : SubtargetFeature<"images",
- "CapsOverride[AMDGPUDeviceInfo::Images]",
- "true",
- "Enable image functions">;
-def FeatureMultiUAV : SubtargetFeature<"multi_uav",
- "CapsOverride[AMDGPUDeviceInfo::MultiUAV]",
- "true",
- "Generate multiple UAV code(HD5XXX family or later)">;
-def FeatureMacroDB : SubtargetFeature<"macrodb",
- "CapsOverride[AMDGPUDeviceInfo::MacroDB]",
- "true",
- "Use internal macrodb, instead of macrodb in driver">;
-def FeatureNoAlias : SubtargetFeature<"noalias",
- "CapsOverride[AMDGPUDeviceInfo::NoAlias]",
- "true",
- "assert that all kernel argument pointers are not aliased">;
-def FeatureNoInline : SubtargetFeature<"no-inline",
- "CapsOverride[AMDGPUDeviceInfo::NoInline]",
- "true",
- "specify whether to not inline functions">;
-
-def Feature64BitPtr : SubtargetFeature<"64BitPtr",
- "Is64bit",
- "false",
- "Specify if 64bit addressing should be used.">;
-
-def Feature32on64BitPtr : SubtargetFeature<"64on32BitPtr",
- "Is32on64bit",
- "false",
- "Specify if 64bit sized pointers with 32bit addressing should be used.">;
-def FeatureDebug : SubtargetFeature<"debug",
- "CapsOverride[AMDGPUDeviceInfo::Debug]",
- "true",
- "Debug mode is enabled, so disable hardware accelerated address spaces.">;
-def FeatureDumpCode : SubtargetFeature <"DumpCode",
- "DumpCode",
- "true",
- "Dump MachineInstrs in the CodeEmitter">;
-
-def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst",
- "R600ALUInst",
- "false",
- "Older version of ALU instructions encoding.">;
-
-def FeatureVertexCache : SubtargetFeature<"HasVertexCache",
- "HasVertexCache",
- "true",
- "Specify use of dedicated vertex cache.">;
-
-//===----------------------------------------------------------------------===//
// Register File, Calling Conv, Instruction Descriptions
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/R600/AMDILCFGStructurizer.cpp b/lib/Target/R600/AMDILCFGStructurizer.cpp
index b0cd0f9..507570f 100644
--- a/lib/Target/R600/AMDILCFGStructurizer.cpp
+++ b/lib/Target/R600/AMDILCFGStructurizer.cpp
@@ -8,14 +8,17 @@
/// \file
//==-----------------------------------------------------------------------===//
-#define DEBUGME 0
#define DEBUG_TYPE "structcfg"
+#include "AMDGPU.h"
#include "AMDGPUInstrInfo.h"
-#include "AMDIL.h"
+#include "R600InstrInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/Analysis/DominatorInternals.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -28,9 +31,12 @@
#include "llvm/CodeGen/MachinePostDominators.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+#define DEFAULT_VEC_SLOTS 8
+
// TODO: move-begin.
//===----------------------------------------------------------------------===//
@@ -43,12 +49,8 @@ STATISTIC(numSerialPatternMatch, "CFGStructurizer number of serial pattern "
"matched");
STATISTIC(numIfPatternMatch, "CFGStructurizer number of if pattern "
"matched");
-STATISTIC(numLoopbreakPatternMatch, "CFGStructurizer number of loop-break "
- "pattern matched");
STATISTIC(numLoopcontPatternMatch, "CFGStructurizer number of loop-continue "
"pattern matched");
-STATISTIC(numLoopPatternMatch, "CFGStructurizer number of loop pattern "
- "matched");
STATISTIC(numClonedBlock, "CFGStructurizer cloned blocks");
STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions");
@@ -57,39 +59,29 @@ STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions");
// Miscellaneous utility for CFGStructurizer.
//
//===----------------------------------------------------------------------===//
-namespace llvmCFGStruct {
+namespace {
#define SHOWNEWINSTR(i) \
- if (DEBUGME) errs() << "New instr: " << *i << "\n"
+ DEBUG(dbgs() << "New instr: " << *i << "\n");
#define SHOWNEWBLK(b, msg) \
-if (DEBUGME) { \
- errs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
- errs() << "\n"; \
-}
+DEBUG( \
+ dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
+ dbgs() << "\n"; \
+);
#define SHOWBLK_DETAIL(b, msg) \
-if (DEBUGME) { \
+DEBUG( \
if (b) { \
- errs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
- b->print(errs()); \
- errs() << "\n"; \
+ dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
+ b->print(dbgs()); \
+ dbgs() << "\n"; \
} \
-}
+);
#define INVALIDSCCNUM -1
-#define INVALIDREGNUM 0
-
-template<class LoopinfoT>
-void PrintLoopinfo(const LoopinfoT &LoopInfo, llvm::raw_ostream &OS) {
- for (typename LoopinfoT::iterator iter = LoopInfo.begin(),
- iterEnd = LoopInfo.end();
- iter != iterEnd; ++iter) {
- (*iter)->print(OS, 0);
- }
-}
template<class NodeT>
-void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) {
+void ReverseVector(SmallVectorImpl<NodeT *> &Src) {
size_t sz = Src.size();
for (size_t i = 0; i < sz/2; ++i) {
NodeT *t = Src[i];
@@ -98,7 +90,7 @@ void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) {
}
}
-} //end namespace llvmCFGStruct
+} // end anonymous namespace
//===----------------------------------------------------------------------===//
//
@@ -106,43 +98,17 @@ void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) {
//
//===----------------------------------------------------------------------===//
-namespace llvmCFGStruct {
-template<class PassT>
-struct CFGStructTraits {
-};
-template <class InstrT>
-class BlockInformation {
-public:
- bool isRetired;
- int sccNum;
- //SmallVector<InstrT*, DEFAULT_VEC_SLOTS> succInstr;
- //Instructions defining the corresponding successor.
- BlockInformation() : isRetired(false), sccNum(INVALIDSCCNUM) {}
-};
+namespace {
-template <class BlockT, class InstrT, class RegiT>
-class LandInformation {
+class BlockInformation {
public:
- BlockT *landBlk;
- std::set<RegiT> breakInitRegs; //Registers that need to "reg = 0", before
- //WHILELOOP(thisloop) init before entering
- //thisloop.
- std::set<RegiT> contInitRegs; //Registers that need to "reg = 0", after
- //WHILELOOP(thisloop) init after entering
- //thisloop.
- std::set<RegiT> endbranchInitRegs; //Init before entering this loop, at loop
- //land block, branch cond on this reg.
- std::set<RegiT> breakOnRegs; //registers that need to "if (reg) break
- //endif" after ENDLOOP(thisloop) break
- //outerLoopOf(thisLoop).
- std::set<RegiT> contOnRegs; //registers that need to "if (reg) continue
- //endif" after ENDLOOP(thisloop) continue on
- //outerLoopOf(thisLoop).
- LandInformation() : landBlk(NULL) {}
+ bool IsRetired;
+ int SccNum;
+ BlockInformation() : IsRetired(false), SccNum(INVALIDSCCNUM) {}
};
-} //end of namespace llvmCFGStruct
+} // end anonymous namespace
//===----------------------------------------------------------------------===//
//
@@ -150,1026 +116,1213 @@ public:
//
//===----------------------------------------------------------------------===//
-namespace llvmCFGStruct {
-// bixia TODO: port it to BasicBlock, not just MachineBasicBlock.
-template<class PassT>
-class CFGStructurizer {
+namespace {
+class AMDGPUCFGStructurizer : public MachineFunctionPass {
public:
- typedef enum {
+ typedef SmallVector<MachineBasicBlock *, 32> MBBVector;
+ typedef std::map<MachineBasicBlock *, BlockInformation *> MBBInfoMap;
+ typedef std::map<MachineLoop *, MachineBasicBlock *> LoopLandInfoMap;
+
+ enum PathToKind {
Not_SinglePath = 0,
SinglePath_InPath = 1,
SinglePath_NotInPath = 2
- } PathToKind;
+ };
-public:
- typedef typename PassT::InstructionType InstrT;
- typedef typename PassT::FunctionType FuncT;
- typedef typename PassT::DominatortreeType DomTreeT;
- typedef typename PassT::PostDominatortreeType PostDomTreeT;
- typedef typename PassT::DomTreeNodeType DomTreeNodeT;
- typedef typename PassT::LoopinfoType LoopInfoT;
-
- typedef GraphTraits<FuncT *> FuncGTraits;
- //typedef FuncGTraits::nodes_iterator BlockIterator;
- typedef typename FuncT::iterator BlockIterator;
-
- typedef typename FuncGTraits::NodeType BlockT;
- typedef GraphTraits<BlockT *> BlockGTraits;
- typedef GraphTraits<Inverse<BlockT *> > InvBlockGTraits;
- //typedef BlockGTraits::succ_iterator InstructionIterator;
- typedef typename BlockT::iterator InstrIterator;
-
- typedef CFGStructTraits<PassT> CFGTraits;
- typedef BlockInformation<InstrT> BlockInfo;
- typedef std::map<BlockT *, BlockInfo *> BlockInfoMap;
-
- typedef int RegiT;
- typedef typename PassT::LoopType LoopT;
- typedef LandInformation<BlockT, InstrT, RegiT> LoopLandInfo;
- typedef std::map<LoopT *, LoopLandInfo *> LoopLandInfoMap;
- //landing info for loop break
- typedef SmallVector<BlockT *, 32> BlockTSmallerVector;
+ static char ID;
-public:
- CFGStructurizer();
- ~CFGStructurizer();
+ AMDGPUCFGStructurizer(TargetMachine &tm) :
+ MachineFunctionPass(ID), TM(tm),
+ TII(static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
+ TRI(&TII->getRegisterInfo()) { }
+
+ const char *getPassName() const {
+ return "AMD IL Control Flow Graph structurizer Pass";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addPreserved<MachineFunctionAnalysis>();
+ AU.addRequired<MachineFunctionAnalysis>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<MachinePostDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ }
/// Perform the CFG structurization
- bool run(FuncT &Func, PassT &Pass, const AMDGPURegisterInfo *tri);
+ bool run();
/// Perform the CFG preparation
- bool prepare(FuncT &Func, PassT &Pass, const AMDGPURegisterInfo *tri);
+ /// This step will remove every unconditionnal/dead jump instructions and make
+ /// sure all loops have an exit block
+ bool prepare();
+
+ bool runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(MF.dump(););
+ OrderedBlks.clear();
+ FuncRep = &MF;
+ MLI = &getAnalysis<MachineLoopInfo>();
+ DEBUG(dbgs() << "LoopInfo:\n"; PrintLoopinfo(*MLI););
+ MDT = &getAnalysis<MachineDominatorTree>();
+ DEBUG(MDT->print(dbgs(), (const llvm::Module*)0););
+ PDT = &getAnalysis<MachinePostDominatorTree>();
+ DEBUG(PDT->print(dbgs()););
+ prepare();
+ run();
+ DEBUG(MF.dump(););
+ return true;
+ }
-private:
- void reversePredicateSetter(typename BlockT::iterator);
- void orderBlocks();
- void printOrderedBlocks(llvm::raw_ostream &OS);
- int patternMatch(BlockT *CurBlock);
- int patternMatchGroup(BlockT *CurBlock);
-
- int serialPatternMatch(BlockT *CurBlock);
- int ifPatternMatch(BlockT *CurBlock);
- int switchPatternMatch(BlockT *CurBlock);
- int loopendPatternMatch(BlockT *CurBlock);
- int loopPatternMatch(BlockT *CurBlock);
-
- int loopbreakPatternMatch(LoopT *LoopRep, BlockT *LoopHeader);
- int loopcontPatternMatch(LoopT *LoopRep, BlockT *LoopHeader);
- //int loopWithoutBreak(BlockT *);
-
- void handleLoopbreak (BlockT *ExitingBlock, LoopT *ExitingLoop,
- BlockT *ExitBlock, LoopT *exitLoop, BlockT *landBlock);
- void handleLoopcontBlock(BlockT *ContingBlock, LoopT *contingLoop,
- BlockT *ContBlock, LoopT *contLoop);
- bool isSameloopDetachedContbreak(BlockT *Src1Block, BlockT *Src2Block);
- int handleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock,
- BlockT *FalseBlock);
- int handleJumpintoIfImp(BlockT *HeadBlock, BlockT *TrueBlock,
- BlockT *FalseBlock);
- int improveSimpleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock,
- BlockT *FalseBlock, BlockT **LandBlockPtr);
- void showImproveSimpleJumpintoIf(BlockT *HeadBlock, BlockT *TrueBlock,
- BlockT *FalseBlock, BlockT *LandBlock,
- bool Detail = false);
- PathToKind singlePathTo(BlockT *SrcBlock, BlockT *DstBlock,
- bool AllowSideEntry = true);
- BlockT *singlePathEnd(BlockT *srcBlock, BlockT *DstBlock,
- bool AllowSideEntry = true);
- int cloneOnSideEntryTo(BlockT *PreBlock, BlockT *SrcBlock, BlockT *DstBlock);
- void mergeSerialBlock(BlockT *DstBlock, BlockT *srcBlock);
-
- void mergeIfthenelseBlock(InstrT *BranchInstr, BlockT *CurBlock,
- BlockT *TrueBlock, BlockT *FalseBlock,
- BlockT *LandBlock);
- void mergeLooplandBlock(BlockT *DstBlock, LoopLandInfo *LoopLand);
- void mergeLoopbreakBlock(BlockT *ExitingBlock, BlockT *ExitBlock,
- BlockT *ExitLandBlock, RegiT SetReg);
- void settleLoopcontBlock(BlockT *ContingBlock, BlockT *ContBlock,
- RegiT SetReg);
- BlockT *relocateLoopcontBlock(LoopT *ParentLoopRep, LoopT *LoopRep,
- std::set<BlockT*> &ExitBlockSet,
- BlockT *ExitLandBlk);
- BlockT *addLoopEndbranchBlock(LoopT *LoopRep,
- BlockTSmallerVector &ExitingBlocks,
- BlockTSmallerVector &ExitBlocks);
- BlockT *normalizeInfiniteLoopExit(LoopT *LoopRep);
- void removeUnconditionalBranch(BlockT *SrcBlock);
- void removeRedundantConditionalBranch(BlockT *SrcBlock);
- void addDummyExitBlock(SmallVector<BlockT *, DEFAULT_VEC_SLOTS> &RetBlocks);
-
- void removeSuccessor(BlockT *SrcBlock);
- BlockT *cloneBlockForPredecessor(BlockT *CurBlock, BlockT *PredBlock);
- BlockT *exitingBlock2ExitBlock (LoopT *LoopRep, BlockT *exitingBlock);
-
- void migrateInstruction(BlockT *SrcBlock, BlockT *DstBlock,
- InstrIterator InsertPos);
-
- void recordSccnum(BlockT *SrcBlock, int SCCNum);
- int getSCCNum(BlockT *srcBlk);
-
- void retireBlock(BlockT *DstBlock, BlockT *SrcBlock);
- bool isRetiredBlock(BlockT *SrcBlock);
- bool isActiveLoophead(BlockT *CurBlock);
- bool needMigrateBlock(BlockT *Block);
-
- BlockT *recordLoopLandBlock(LoopT *LoopRep, BlockT *LandBlock,
- BlockTSmallerVector &exitBlocks,
- std::set<BlockT*> &ExitBlockSet);
- void setLoopLandBlock(LoopT *LoopRep, BlockT *Block = NULL);
- BlockT *getLoopLandBlock(LoopT *LoopRep);
- LoopLandInfo *getLoopLandInfo(LoopT *LoopRep);
-
- void addLoopBreakOnReg(LoopT *LoopRep, RegiT RegNum);
- void addLoopContOnReg(LoopT *LoopRep, RegiT RegNum);
- void addLoopBreakInitReg(LoopT *LoopRep, RegiT RegNum);
- void addLoopContInitReg(LoopT *LoopRep, RegiT RegNum);
- void addLoopEndbranchInitReg(LoopT *LoopRep, RegiT RegNum);
-
- bool hasBackEdge(BlockT *curBlock);
- unsigned getLoopDepth (LoopT *LoopRep);
- int countActiveBlock(
- typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator IterStart,
- typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator IterEnd);
- BlockT *findNearestCommonPostDom(std::set<BlockT *>&);
- BlockT *findNearestCommonPostDom(BlockT *Block1, BlockT *Block2);
+protected:
+ TargetMachine &TM;
+ MachineDominatorTree *MDT;
+ MachinePostDominatorTree *PDT;
+ MachineLoopInfo *MLI;
+ const R600InstrInfo *TII;
+ const AMDGPURegisterInfo *TRI;
+
+ // PRINT FUNCTIONS
+ /// Print the ordered Blocks.
+ void printOrderedBlocks() const {
+ size_t i = 0;
+ for (MBBVector::const_iterator iterBlk = OrderedBlks.begin(),
+ iterBlkEnd = OrderedBlks.end(); iterBlk != iterBlkEnd; ++iterBlk, ++i) {
+ dbgs() << "BB" << (*iterBlk)->getNumber();
+ dbgs() << "(" << getSCCNum(*iterBlk) << "," << (*iterBlk)->size() << ")";
+ if (i != 0 && i % 10 == 0) {
+ dbgs() << "\n";
+ } else {
+ dbgs() << " ";
+ }
+ }
+ }
+ static void PrintLoopinfo(const MachineLoopInfo &LoopInfo) {
+ for (MachineLoop::iterator iter = LoopInfo.begin(),
+ iterEnd = LoopInfo.end(); iter != iterEnd; ++iter) {
+ (*iter)->print(dbgs(), 0);
+ }
+ }
+
+ // UTILITY FUNCTIONS
+ int getSCCNum(MachineBasicBlock *MBB) const;
+ MachineBasicBlock *getLoopLandInfo(MachineLoop *LoopRep) const;
+ bool hasBackEdge(MachineBasicBlock *MBB) const;
+ static unsigned getLoopDepth(MachineLoop *LoopRep);
+ bool isRetiredBlock(MachineBasicBlock *MBB) const;
+ bool isActiveLoophead(MachineBasicBlock *MBB) const;
+ PathToKind singlePathTo(MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
+ bool AllowSideEntry = true) const;
+ int countActiveBlock(MBBVector::const_iterator It,
+ MBBVector::const_iterator E) const;
+ bool needMigrateBlock(MachineBasicBlock *MBB) const;
+
+ // Utility Functions
+ void reversePredicateSetter(MachineBasicBlock::iterator I);
+ /// Compute the reversed DFS post order of Blocks
+ void orderBlocks(MachineFunction *MF);
+
+ // Function originaly from CFGStructTraits
+ void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
+ DebugLoc DL = DebugLoc());
+ MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
+ DebugLoc DL = DebugLoc());
+ MachineInstr *insertInstrBefore(MachineBasicBlock::iterator I, int NewOpcode);
+ void insertCondBranchBefore(MachineBasicBlock::iterator I, int NewOpcode,
+ DebugLoc DL);
+ void insertCondBranchBefore(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
+ DebugLoc DL);
+ void insertCondBranchEnd(MachineBasicBlock *MBB, int NewOpcode, int RegNum);
+ static int getBranchNzeroOpcode(int OldOpcode);
+ static int getBranchZeroOpcode(int OldOpcode);
+ static int getContinueNzeroOpcode(int OldOpcode);
+ static int getContinueZeroOpcode(int OldOpcode);
+ static MachineBasicBlock *getTrueBranch(MachineInstr *MI);
+ static void setTrueBranch(MachineInstr *MI, MachineBasicBlock *MBB);
+ static MachineBasicBlock *getFalseBranch(MachineBasicBlock *MBB,
+ MachineInstr *MI);
+ static bool isCondBranch(MachineInstr *MI);
+ static bool isUncondBranch(MachineInstr *MI);
+ static DebugLoc getLastDebugLocInBB(MachineBasicBlock *MBB);
+ static MachineInstr *getNormalBlockBranchInstr(MachineBasicBlock *MBB);
+ /// The correct naming for this is getPossibleLoopendBlockBranchInstr.
+ ///
+ /// BB with backward-edge could have move instructions after the branch
+ /// instruction. Such move instruction "belong to" the loop backward-edge.
+ MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *MBB);
+ static MachineInstr *getReturnInstr(MachineBasicBlock *MBB);
+ static MachineInstr *getContinueInstr(MachineBasicBlock *MBB);
+ static bool isReturnBlock(MachineBasicBlock *MBB);
+ static void cloneSuccessorList(MachineBasicBlock *DstMBB,
+ MachineBasicBlock *SrcMBB) ;
+ static MachineBasicBlock *clone(MachineBasicBlock *MBB);
+ /// MachineBasicBlock::ReplaceUsesOfBlockWith doesn't serve the purpose
+ /// because the AMDGPU instruction is not recognized as terminator fix this
+ /// and retire this routine
+ void replaceInstrUseOfBlockWith(MachineBasicBlock *SrcMBB,
+ MachineBasicBlock *OldMBB, MachineBasicBlock *NewBlk);
+ static void wrapup(MachineBasicBlock *MBB);
+
+
+ int patternMatch(MachineBasicBlock *MBB);
+ int patternMatchGroup(MachineBasicBlock *MBB);
+ int serialPatternMatch(MachineBasicBlock *MBB);
+ int ifPatternMatch(MachineBasicBlock *MBB);
+ int loopendPatternMatch();
+ int mergeLoop(MachineLoop *LoopRep);
+ int loopcontPatternMatch(MachineLoop *LoopRep, MachineBasicBlock *LoopHeader);
+
+ void handleLoopcontBlock(MachineBasicBlock *ContingMBB,
+ MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
+ MachineLoop *ContLoop);
+ /// return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in
+ /// the same loop with LoopLandInfo without explicitly keeping track of
+ /// loopContBlks and loopBreakBlks, this is a method to get the information.
+ bool isSameloopDetachedContbreak(MachineBasicBlock *Src1MBB,
+ MachineBasicBlock *Src2MBB);
+ int handleJumpintoIf(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB);
+ int handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB);
+ int improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
+ MachineBasicBlock **LandMBBPtr);
+ void showImproveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
+ MachineBasicBlock *LandMBB, bool Detail = false);
+ int cloneOnSideEntryTo(MachineBasicBlock *PreMBB,
+ MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB);
+ void mergeSerialBlock(MachineBasicBlock *DstMBB,
+ MachineBasicBlock *SrcMBB);
+
+ void mergeIfthenelseBlock(MachineInstr *BranchMI,
+ MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
+ MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB);
+ void mergeLooplandBlock(MachineBasicBlock *DstMBB,
+ MachineBasicBlock *LandMBB);
+ void mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
+ MachineBasicBlock *LandMBB);
+ void settleLoopcontBlock(MachineBasicBlock *ContingMBB,
+ MachineBasicBlock *ContMBB);
+ /// normalizeInfiniteLoopExit change
+ /// B1:
+ /// uncond_br LoopHeader
+ ///
+ /// to
+ /// B1:
+ /// cond_br 1 LoopHeader dummyExit
+ /// and return the newly added dummy exit block
+ MachineBasicBlock *normalizeInfiniteLoopExit(MachineLoop *LoopRep);
+ void removeUnconditionalBranch(MachineBasicBlock *MBB);
+ /// Remove duplicate branches instructions in a block.
+ /// For instance
+ /// B0:
+ /// cond_br X B1 B2
+ /// cond_br X B1 B2
+ /// is transformed to
+ /// B0:
+ /// cond_br X B1 B2
+ void removeRedundantConditionalBranch(MachineBasicBlock *MBB);
+ void addDummyExitBlock(SmallVectorImpl<MachineBasicBlock *> &RetMBB);
+ void removeSuccessor(MachineBasicBlock *MBB);
+ MachineBasicBlock *cloneBlockForPredecessor(MachineBasicBlock *MBB,
+ MachineBasicBlock *PredMBB);
+ void migrateInstruction(MachineBasicBlock *SrcMBB,
+ MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I);
+ void recordSccnum(MachineBasicBlock *MBB, int SCCNum);
+ void retireBlock(MachineBasicBlock *MBB);
+ void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = NULL);
+
+ MachineBasicBlock *findNearestCommonPostDom(std::set<MachineBasicBlock *>&);
+ /// This is work around solution for findNearestCommonDominator not avaiable
+ /// to post dom a proper fix should go to Dominators.h.
+ MachineBasicBlock *findNearestCommonPostDom(MachineBasicBlock *MBB1,
+ MachineBasicBlock *MBB2);
private:
- DomTreeT *domTree;
- PostDomTreeT *postDomTree;
- LoopInfoT *loopInfo;
- PassT *passRep;
- FuncT *funcRep;
-
- BlockInfoMap blockInfoMap;
- LoopLandInfoMap loopLandInfoMap;
- SmallVector<BlockT *, DEFAULT_VEC_SLOTS> orderedBlks;
- const AMDGPURegisterInfo *TRI;
+ MBBInfoMap BlockInfoMap;
+ LoopLandInfoMap LLInfoMap;
+ std::map<MachineLoop *, bool> Visited;
+ MachineFunction *FuncRep;
+ SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> OrderedBlks;
+};
+
+int AMDGPUCFGStructurizer::getSCCNum(MachineBasicBlock *MBB) const {
+ MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
+ if (It == BlockInfoMap.end())
+ return INVALIDSCCNUM;
+ return (*It).second->SccNum;
+}
-}; //template class CFGStructurizer
+MachineBasicBlock *AMDGPUCFGStructurizer::getLoopLandInfo(MachineLoop *LoopRep)
+ const {
+ LoopLandInfoMap::const_iterator It = LLInfoMap.find(LoopRep);
+ if (It == LLInfoMap.end())
+ return NULL;
+ return (*It).second;
+}
+
+bool AMDGPUCFGStructurizer::hasBackEdge(MachineBasicBlock *MBB) const {
+ MachineLoop *LoopRep = MLI->getLoopFor(MBB);
+ if (!LoopRep)
+ return false;
+ MachineBasicBlock *LoopHeader = LoopRep->getHeader();
+ return MBB->isSuccessor(LoopHeader);
+}
+
+unsigned AMDGPUCFGStructurizer::getLoopDepth(MachineLoop *LoopRep) {
+ return LoopRep ? LoopRep->getLoopDepth() : 0;
+}
-template<class PassT> CFGStructurizer<PassT>::CFGStructurizer()
- : domTree(NULL), postDomTree(NULL), loopInfo(NULL) {
+bool AMDGPUCFGStructurizer::isRetiredBlock(MachineBasicBlock *MBB) const {
+ MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
+ if (It == BlockInfoMap.end())
+ return false;
+ return (*It).second->IsRetired;
}
-template<class PassT> CFGStructurizer<PassT>::~CFGStructurizer() {
- for (typename BlockInfoMap::iterator I = blockInfoMap.begin(),
- E = blockInfoMap.end(); I != E; ++I) {
- delete I->second;
+bool AMDGPUCFGStructurizer::isActiveLoophead(MachineBasicBlock *MBB) const {
+ MachineLoop *LoopRep = MLI->getLoopFor(MBB);
+ while (LoopRep && LoopRep->getHeader() == MBB) {
+ MachineBasicBlock *LoopLand = getLoopLandInfo(LoopRep);
+ if(!LoopLand)
+ return true;
+ if (!isRetiredBlock(LoopLand))
+ return true;
+ LoopRep = LoopRep->getParentLoop();
}
+ return false;
+}
+AMDGPUCFGStructurizer::PathToKind AMDGPUCFGStructurizer::singlePathTo(
+ MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
+ bool AllowSideEntry) const {
+ assert(DstMBB);
+ if (SrcMBB == DstMBB)
+ return SinglePath_InPath;
+ while (SrcMBB && SrcMBB->succ_size() == 1) {
+ SrcMBB = *SrcMBB->succ_begin();
+ if (SrcMBB == DstMBB)
+ return SinglePath_InPath;
+ if (!AllowSideEntry && SrcMBB->pred_size() > 1)
+ return Not_SinglePath;
+ }
+ if (SrcMBB && SrcMBB->succ_size()==0)
+ return SinglePath_NotInPath;
+ return Not_SinglePath;
}
-template<class PassT>
-bool CFGStructurizer<PassT>::prepare(FuncT &func, PassT &pass,
- const AMDGPURegisterInfo * tri) {
- passRep = &pass;
- funcRep = &func;
- TRI = tri;
+int AMDGPUCFGStructurizer::countActiveBlock(MBBVector::const_iterator It,
+ MBBVector::const_iterator E) const {
+ int Count = 0;
+ while (It != E) {
+ if (!isRetiredBlock(*It))
+ ++Count;
+ ++It;
+ }
+ return Count;
+}
- bool changed = false;
+bool AMDGPUCFGStructurizer::needMigrateBlock(MachineBasicBlock *MBB) const {
+ unsigned BlockSizeThreshold = 30;
+ unsigned CloneInstrThreshold = 100;
+ bool MultiplePreds = MBB && (MBB->pred_size() > 1);
- //FIXME: if not reducible flow graph, make it so ???
+ if(!MultiplePreds)
+ return false;
+ unsigned BlkSize = MBB->size();
+ return ((BlkSize > BlockSizeThreshold) &&
+ (BlkSize * (MBB->pred_size() - 1) > CloneInstrThreshold));
+}
- if (DEBUGME) {
- errs() << "AMDGPUCFGStructurizer::prepare\n";
+void AMDGPUCFGStructurizer::reversePredicateSetter(
+ MachineBasicBlock::iterator I) {
+ while (I--) {
+ if (I->getOpcode() == AMDGPU::PRED_X) {
+ switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) {
+ case OPCODE_IS_ZERO_INT:
+ static_cast<MachineInstr *>(I)->getOperand(2)
+ .setImm(OPCODE_IS_NOT_ZERO_INT);
+ return;
+ case OPCODE_IS_NOT_ZERO_INT:
+ static_cast<MachineInstr *>(I)->getOperand(2)
+ .setImm(OPCODE_IS_ZERO_INT);
+ return;
+ case OPCODE_IS_ZERO:
+ static_cast<MachineInstr *>(I)->getOperand(2)
+ .setImm(OPCODE_IS_NOT_ZERO);
+ return;
+ case OPCODE_IS_NOT_ZERO:
+ static_cast<MachineInstr *>(I)->getOperand(2)
+ .setImm(OPCODE_IS_ZERO);
+ return;
+ default:
+ llvm_unreachable("PRED_X Opcode invalid!");
+ }
+ }
}
+}
+
+void AMDGPUCFGStructurizer::insertInstrEnd(MachineBasicBlock *MBB,
+ int NewOpcode, DebugLoc DL) {
+ MachineInstr *MI = MBB->getParent()
+ ->CreateMachineInstr(TII->get(NewOpcode), DL);
+ MBB->push_back(MI);
+ //assume the instruction doesn't take any reg operand ...
+ SHOWNEWINSTR(MI);
+}
+
+MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(MachineBasicBlock *MBB,
+ int NewOpcode, DebugLoc DL) {
+ MachineInstr *MI =
+ MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DL);
+ if (MBB->begin() != MBB->end())
+ MBB->insert(MBB->begin(), MI);
+ else
+ MBB->push_back(MI);
+ SHOWNEWINSTR(MI);
+ return MI;
+}
+
+MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(
+ MachineBasicBlock::iterator I, int NewOpcode) {
+ MachineInstr *OldMI = &(*I);
+ MachineBasicBlock *MBB = OldMI->getParent();
+ MachineInstr *NewMBB =
+ MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
+ MBB->insert(I, NewMBB);
+ //assume the instruction doesn't take any reg operand ...
+ SHOWNEWINSTR(NewMBB);
+ return NewMBB;
+}
+
+void AMDGPUCFGStructurizer::insertCondBranchBefore(
+ MachineBasicBlock::iterator I, int NewOpcode, DebugLoc DL) {
+ MachineInstr *OldMI = &(*I);
+ MachineBasicBlock *MBB = OldMI->getParent();
+ MachineFunction *MF = MBB->getParent();
+ MachineInstr *NewMI = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
+ MBB->insert(I, NewMI);
+ MachineInstrBuilder MIB(*MF, NewMI);
+ MIB.addReg(OldMI->getOperand(1).getReg(), false);
+ SHOWNEWINSTR(NewMI);
+ //erase later oldInstr->eraseFromParent();
+}
+
+void AMDGPUCFGStructurizer::insertCondBranchBefore(MachineBasicBlock *blk,
+ MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
+ DebugLoc DL) {
+ MachineFunction *MF = blk->getParent();
+ MachineInstr *NewInstr = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
+ //insert before
+ blk->insert(I, NewInstr);
+ MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
+ SHOWNEWINSTR(NewInstr);
+}
- loopInfo = CFGTraits::getLoopInfo(pass);
- if (DEBUGME) {
- errs() << "LoopInfo:\n";
- PrintLoopinfo(*loopInfo, errs());
+void AMDGPUCFGStructurizer::insertCondBranchEnd(MachineBasicBlock *MBB,
+ int NewOpcode, int RegNum) {
+ MachineFunction *MF = MBB->getParent();
+ MachineInstr *NewInstr =
+ MF->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
+ MBB->push_back(NewInstr);
+ MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
+ SHOWNEWINSTR(NewInstr);
+}
+
+int AMDGPUCFGStructurizer::getBranchNzeroOpcode(int OldOpcode) {
+ switch(OldOpcode) {
+ case AMDGPU::JUMP_COND:
+ case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
+ case AMDGPU::BRANCH_COND_i32:
+ case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALNZ_f32;
+ default: llvm_unreachable("internal error");
}
+ return -1;
+}
- orderBlocks();
- if (DEBUGME) {
- errs() << "Ordered blocks:\n";
- printOrderedBlocks(errs());
+int AMDGPUCFGStructurizer::getBranchZeroOpcode(int OldOpcode) {
+ switch(OldOpcode) {
+ case AMDGPU::JUMP_COND:
+ case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
+ case AMDGPU::BRANCH_COND_i32:
+ case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALZ_f32;
+ default: llvm_unreachable("internal error");
}
+ return -1;
+}
- SmallVector<BlockT *, DEFAULT_VEC_SLOTS> retBlks;
-
- for (typename LoopInfoT::iterator iter = loopInfo->begin(),
- iterEnd = loopInfo->end();
- iter != iterEnd; ++iter) {
- LoopT* loopRep = (*iter);
- BlockTSmallerVector exitingBlks;
- loopRep->getExitingBlocks(exitingBlks);
-
- if (exitingBlks.size() == 0) {
- BlockT* dummyExitBlk = normalizeInfiniteLoopExit(loopRep);
- if (dummyExitBlk != NULL)
- retBlks.push_back(dummyExitBlk);
- }
+int AMDGPUCFGStructurizer::getContinueNzeroOpcode(int OldOpcode) {
+ switch(OldOpcode) {
+ case AMDGPU::JUMP_COND:
+ case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALNZ_i32;
+ default: llvm_unreachable("internal error");
+ };
+ return -1;
+}
+
+int AMDGPUCFGStructurizer::getContinueZeroOpcode(int OldOpcode) {
+ switch(OldOpcode) {
+ case AMDGPU::JUMP_COND:
+ case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALZ_i32;
+ default: llvm_unreachable("internal error");
}
+ return -1;
+}
- // Remove unconditional branch instr.
- // Add dummy exit block iff there are multiple returns.
+MachineBasicBlock *AMDGPUCFGStructurizer::getTrueBranch(MachineInstr *MI) {
+ return MI->getOperand(0).getMBB();
+}
- for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
- iterBlk = orderedBlks.begin(), iterEndBlk = orderedBlks.end();
- iterBlk != iterEndBlk;
- ++iterBlk) {
- BlockT *curBlk = *iterBlk;
- removeUnconditionalBranch(curBlk);
- removeRedundantConditionalBranch(curBlk);
- if (CFGTraits::isReturnBlock(curBlk)) {
- retBlks.push_back(curBlk);
+void AMDGPUCFGStructurizer::setTrueBranch(MachineInstr *MI,
+ MachineBasicBlock *MBB) {
+ MI->getOperand(0).setMBB(MBB);
+}
+
+MachineBasicBlock *
+AMDGPUCFGStructurizer::getFalseBranch(MachineBasicBlock *MBB,
+ MachineInstr *MI) {
+ assert(MBB->succ_size() == 2);
+ MachineBasicBlock *TrueBranch = getTrueBranch(MI);
+ MachineBasicBlock::succ_iterator It = MBB->succ_begin();
+ MachineBasicBlock::succ_iterator Next = It;
+ ++Next;
+ return (*It == TrueBranch) ? *Next : *It;
+}
+
+bool AMDGPUCFGStructurizer::isCondBranch(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case AMDGPU::JUMP_COND:
+ case AMDGPU::BRANCH_COND_i32:
+ case AMDGPU::BRANCH_COND_f32: return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool AMDGPUCFGStructurizer::isUncondBranch(MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case AMDGPU::JUMP:
+ case AMDGPU::BRANCH:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+DebugLoc AMDGPUCFGStructurizer::getLastDebugLocInBB(MachineBasicBlock *MBB) {
+ //get DebugLoc from the first MachineBasicBlock instruction with debug info
+ DebugLoc DL;
+ for (MachineBasicBlock::iterator It = MBB->begin(); It != MBB->end();
+ ++It) {
+ MachineInstr *instr = &(*It);
+ if (instr->getDebugLoc().isUnknown() == false)
+ DL = instr->getDebugLoc();
+ }
+ return DL;
+}
+
+MachineInstr *AMDGPUCFGStructurizer::getNormalBlockBranchInstr(
+ MachineBasicBlock *MBB) {
+ MachineBasicBlock::reverse_iterator It = MBB->rbegin();
+ MachineInstr *MI = &*It;
+ if (MI && (isCondBranch(MI) || isUncondBranch(MI)))
+ return MI;
+ return NULL;
+}
+
+MachineInstr *AMDGPUCFGStructurizer::getLoopendBlockBranchInstr(
+ MachineBasicBlock *MBB) {
+ for (MachineBasicBlock::reverse_iterator It = MBB->rbegin(), E = MBB->rend();
+ It != E; ++It) {
+ // FIXME: Simplify
+ MachineInstr *MI = &*It;
+ if (MI) {
+ if (isCondBranch(MI) || isUncondBranch(MI))
+ return MI;
+ else if (!TII->isMov(MI->getOpcode()))
+ break;
}
- assert(curBlk->succ_size() <= 2);
- } //for
+ }
+ return NULL;
+}
- if (retBlks.size() >= 2) {
- addDummyExitBlock(retBlks);
- changed = true;
+MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) {
+ MachineBasicBlock::reverse_iterator It = MBB->rbegin();
+ if (It != MBB->rend()) {
+ MachineInstr *instr = &(*It);
+ if (instr->getOpcode() == AMDGPU::RETURN)
+ return instr;
}
+ return NULL;
+}
- return changed;
-} //CFGStructurizer::prepare
+MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) {
+ MachineBasicBlock::reverse_iterator It = MBB->rbegin();
+ if (It != MBB->rend()) {
+ MachineInstr *MI = &(*It);
+ if (MI->getOpcode() == AMDGPU::CONTINUE)
+ return MI;
+ }
+ return NULL;
+}
-template<class PassT>
-bool CFGStructurizer<PassT>::run(FuncT &func, PassT &pass,
- const AMDGPURegisterInfo * tri) {
- passRep = &pass;
- funcRep = &func;
- TRI = tri;
+bool AMDGPUCFGStructurizer::isReturnBlock(MachineBasicBlock *MBB) {
+ MachineInstr *MI = getReturnInstr(MBB);
+ bool IsReturn = (MBB->succ_size() == 0);
+ if (MI)
+ assert(IsReturn);
+ else if (IsReturn)
+ DEBUG(
+ dbgs() << "BB" << MBB->getNumber()
+ <<" is return block without RETURN instr\n";);
+ return IsReturn;
+}
- //Assume reducible CFG...
- if (DEBUGME) {
- errs() << "AMDGPUCFGStructurizer::run\n";
- func.viewCFG();
+void AMDGPUCFGStructurizer::cloneSuccessorList(MachineBasicBlock *DstMBB,
+ MachineBasicBlock *SrcMBB) {
+ for (MachineBasicBlock::succ_iterator It = SrcMBB->succ_begin(),
+ iterEnd = SrcMBB->succ_end(); It != iterEnd; ++It)
+ DstMBB->addSuccessor(*It); // *iter's predecessor is also taken care of
+}
+
+MachineBasicBlock *AMDGPUCFGStructurizer::clone(MachineBasicBlock *MBB) {
+ MachineFunction *Func = MBB->getParent();
+ MachineBasicBlock *NewMBB = Func->CreateMachineBasicBlock();
+ Func->push_back(NewMBB); //insert to function
+ for (MachineBasicBlock::iterator It = MBB->begin(), E = MBB->end();
+ It != E; ++It) {
+ MachineInstr *MI = Func->CloneMachineInstr(It);
+ NewMBB->push_back(MI);
}
+ return NewMBB;
+}
+
+void AMDGPUCFGStructurizer::replaceInstrUseOfBlockWith(
+ MachineBasicBlock *SrcMBB, MachineBasicBlock *OldMBB,
+ MachineBasicBlock *NewBlk) {
+ MachineInstr *BranchMI = getLoopendBlockBranchInstr(SrcMBB);
+ if (BranchMI && isCondBranch(BranchMI) &&
+ getTrueBranch(BranchMI) == OldMBB)
+ setTrueBranch(BranchMI, NewBlk);
+}
+
+void AMDGPUCFGStructurizer::wrapup(MachineBasicBlock *MBB) {
+ assert((!MBB->getParent()->getJumpTableInfo()
+ || MBB->getParent()->getJumpTableInfo()->isEmpty())
+ && "found a jump table");
+
+ //collect continue right before endloop
+ SmallVector<MachineInstr *, DEFAULT_VEC_SLOTS> ContInstr;
+ MachineBasicBlock::iterator Pre = MBB->begin();
+ MachineBasicBlock::iterator E = MBB->end();
+ MachineBasicBlock::iterator It = Pre;
+ while (It != E) {
+ if (Pre->getOpcode() == AMDGPU::CONTINUE
+ && It->getOpcode() == AMDGPU::ENDLOOP)
+ ContInstr.push_back(Pre);
+ Pre = It;
+ ++It;
+ }
+
+ //delete continue right before endloop
+ for (unsigned i = 0; i < ContInstr.size(); ++i)
+ ContInstr[i]->eraseFromParent();
+
+ // TODO to fix up jump table so later phase won't be confused. if
+ // (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but
+ // there isn't such an interface yet. alternatively, replace all the other
+ // blocks in the jump table with the entryBlk //}
+
+}
+
+
+bool AMDGPUCFGStructurizer::prepare() {
+ bool Changed = false;
+
+ //FIXME: if not reducible flow graph, make it so ???
+
+ DEBUG(dbgs() << "AMDGPUCFGStructurizer::prepare\n";);
- domTree = CFGTraits::getDominatorTree(pass);
- if (DEBUGME) {
- domTree->print(errs(), (const llvm::Module*)0);
+ orderBlocks(FuncRep);
+
+ SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> RetBlks;
+
+ // Add an ExitBlk to loop that don't have one
+ for (MachineLoopInfo::iterator It = MLI->begin(),
+ E = MLI->end(); It != E; ++It) {
+ MachineLoop *LoopRep = (*It);
+ MBBVector ExitingMBBs;
+ LoopRep->getExitingBlocks(ExitingMBBs);
+
+ if (ExitingMBBs.size() == 0) {
+ MachineBasicBlock* DummyExitBlk = normalizeInfiniteLoopExit(LoopRep);
+ if (DummyExitBlk)
+ RetBlks.push_back(DummyExitBlk);
+ }
}
- postDomTree = CFGTraits::getPostDominatorTree(pass);
- if (DEBUGME) {
- postDomTree->print(errs());
+ // Remove unconditional branch instr.
+ // Add dummy exit block iff there are multiple returns.
+ for (SmallVectorImpl<MachineBasicBlock *>::const_iterator
+ It = OrderedBlks.begin(), E = OrderedBlks.end(); It != E; ++It) {
+ MachineBasicBlock *MBB = *It;
+ removeUnconditionalBranch(MBB);
+ removeRedundantConditionalBranch(MBB);
+ if (isReturnBlock(MBB)) {
+ RetBlks.push_back(MBB);
+ }
+ assert(MBB->succ_size() <= 2);
}
- loopInfo = CFGTraits::getLoopInfo(pass);
- if (DEBUGME) {
- errs() << "LoopInfo:\n";
- PrintLoopinfo(*loopInfo, errs());
+ if (RetBlks.size() >= 2) {
+ addDummyExitBlock(RetBlks);
+ Changed = true;
}
- orderBlocks();
+ return Changed;
+}
+
+bool AMDGPUCFGStructurizer::run() {
+
+ //Assume reducible CFG...
+ DEBUG(dbgs() << "AMDGPUCFGStructurizer::run\n";FuncRep->viewCFG(););
+
#ifdef STRESSTEST
//Use the worse block ordering to test the algorithm.
ReverseVector(orderedBlks);
#endif
- if (DEBUGME) {
- errs() << "Ordered blocks:\n";
- printOrderedBlocks(errs());
- }
- int numIter = 0;
- bool finish = false;
- BlockT *curBlk;
- bool makeProgress = false;
- int numRemainedBlk = countActiveBlock(orderedBlks.begin(),
- orderedBlks.end());
+ DEBUG(dbgs() << "Ordered blocks:\n"; printOrderedBlocks(););
+ int NumIter = 0;
+ bool Finish = false;
+ MachineBasicBlock *MBB;
+ bool MakeProgress = false;
+ int NumRemainedBlk = countActiveBlock(OrderedBlks.begin(),
+ OrderedBlks.end());
do {
- ++numIter;
- if (DEBUGME) {
- errs() << "numIter = " << numIter
- << ", numRemaintedBlk = " << numRemainedBlk << "\n";
- }
-
- typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
- iterBlk = orderedBlks.begin();
- typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
- iterBlkEnd = orderedBlks.end();
-
- typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
- sccBeginIter = iterBlk;
- BlockT *sccBeginBlk = NULL;
- int sccNumBlk = 0; // The number of active blocks, init to a
+ ++NumIter;
+ DEBUG(
+ dbgs() << "numIter = " << NumIter
+ << ", numRemaintedBlk = " << NumRemainedBlk << "\n";
+ );
+
+ SmallVectorImpl<MachineBasicBlock *>::const_iterator It =
+ OrderedBlks.begin();
+ SmallVectorImpl<MachineBasicBlock *>::const_iterator E =
+ OrderedBlks.end();
+
+ SmallVectorImpl<MachineBasicBlock *>::const_iterator SccBeginIter =
+ It;
+ MachineBasicBlock *SccBeginMBB = NULL;
+ int SccNumBlk = 0; // The number of active blocks, init to a
// maximum possible number.
- int sccNumIter; // Number of iteration in this SCC.
-
- while (iterBlk != iterBlkEnd) {
- curBlk = *iterBlk;
-
- if (sccBeginBlk == NULL) {
- sccBeginIter = iterBlk;
- sccBeginBlk = curBlk;
- sccNumIter = 0;
- sccNumBlk = numRemainedBlk; // Init to maximum possible number.
- if (DEBUGME) {
- errs() << "start processing SCC" << getSCCNum(sccBeginBlk);
- errs() << "\n";
- }
+ int SccNumIter; // Number of iteration in this SCC.
+
+ while (It != E) {
+ MBB = *It;
+
+ if (!SccBeginMBB) {
+ SccBeginIter = It;
+ SccBeginMBB = MBB;
+ SccNumIter = 0;
+ SccNumBlk = NumRemainedBlk; // Init to maximum possible number.
+ DEBUG(
+ dbgs() << "start processing SCC" << getSCCNum(SccBeginMBB);
+ dbgs() << "\n";
+ );
}
- if (!isRetiredBlock(curBlk)) {
- patternMatch(curBlk);
- }
+ if (!isRetiredBlock(MBB))
+ patternMatch(MBB);
- ++iterBlk;
+ ++It;
- bool contNextScc = true;
- if (iterBlk == iterBlkEnd
- || getSCCNum(sccBeginBlk) != getSCCNum(*iterBlk)) {
+ bool ContNextScc = true;
+ if (It == E
+ || getSCCNum(SccBeginMBB) != getSCCNum(*It)) {
// Just finish one scc.
- ++sccNumIter;
- int sccRemainedNumBlk = countActiveBlock(sccBeginIter, iterBlk);
- if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= sccNumBlk) {
- if (DEBUGME) {
- errs() << "Can't reduce SCC " << getSCCNum(curBlk)
- << ", sccNumIter = " << sccNumIter;
- errs() << "doesn't make any progress\n";
- }
- contNextScc = true;
- } else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < sccNumBlk) {
- sccNumBlk = sccRemainedNumBlk;
- iterBlk = sccBeginIter;
- contNextScc = false;
- if (DEBUGME) {
- errs() << "repeat processing SCC" << getSCCNum(curBlk)
- << "sccNumIter = " << sccNumIter << "\n";
- func.viewCFG();
- }
+ ++SccNumIter;
+ int sccRemainedNumBlk = countActiveBlock(SccBeginIter, It);
+ if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= SccNumBlk) {
+ DEBUG(
+ dbgs() << "Can't reduce SCC " << getSCCNum(MBB)
+ << ", sccNumIter = " << SccNumIter;
+ dbgs() << "doesn't make any progress\n";
+ );
+ ContNextScc = true;
+ } else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < SccNumBlk) {
+ SccNumBlk = sccRemainedNumBlk;
+ It = SccBeginIter;
+ ContNextScc = false;
+ DEBUG(
+ dbgs() << "repeat processing SCC" << getSCCNum(MBB)
+ << "sccNumIter = " << SccNumIter << "\n";
+ FuncRep->viewCFG();
+ );
} else {
// Finish the current scc.
- contNextScc = true;
+ ContNextScc = true;
}
} else {
// Continue on next component in the current scc.
- contNextScc = false;
+ ContNextScc = false;
}
- if (contNextScc) {
- sccBeginBlk = NULL;
- }
+ if (ContNextScc)
+ SccBeginMBB = NULL;
} //while, "one iteration" over the function.
- BlockT *entryBlk = FuncGTraits::nodes_begin(&func);
- if (entryBlk->succ_size() == 0) {
- finish = true;
- if (DEBUGME) {
- errs() << "Reduce to one block\n";
- }
+ MachineBasicBlock *EntryMBB =
+ GraphTraits<MachineFunction *>::nodes_begin(FuncRep);
+ if (EntryMBB->succ_size() == 0) {
+ Finish = true;
+ DEBUG(
+ dbgs() << "Reduce to one block\n";
+ );
} else {
- int newnumRemainedBlk
- = countActiveBlock(orderedBlks.begin(), orderedBlks.end());
+ int NewnumRemainedBlk
+ = countActiveBlock(OrderedBlks.begin(), OrderedBlks.end());
// consider cloned blocks ??
- if (newnumRemainedBlk == 1 || newnumRemainedBlk < numRemainedBlk) {
- makeProgress = true;
- numRemainedBlk = newnumRemainedBlk;
+ if (NewnumRemainedBlk == 1 || NewnumRemainedBlk < NumRemainedBlk) {
+ MakeProgress = true;
+ NumRemainedBlk = NewnumRemainedBlk;
} else {
- makeProgress = false;
- if (DEBUGME) {
- errs() << "No progress\n";
- }
+ MakeProgress = false;
+ DEBUG(
+ dbgs() << "No progress\n";
+ );
}
}
- } while (!finish && makeProgress);
+ } while (!Finish && MakeProgress);
// Misc wrap up to maintain the consistency of the Function representation.
- CFGTraits::wrapup(FuncGTraits::nodes_begin(&func));
+ wrapup(GraphTraits<MachineFunction *>::nodes_begin(FuncRep));
// Detach retired Block, release memory.
- for (typename BlockInfoMap::iterator iterMap = blockInfoMap.begin(),
- iterEndMap = blockInfoMap.end(); iterMap != iterEndMap; ++iterMap) {
- if ((*iterMap).second && (*iterMap).second->isRetired) {
- assert(((*iterMap).first)->getNumber() != -1);
- if (DEBUGME) {
- errs() << "Erase BB" << ((*iterMap).first)->getNumber() << "\n";
- }
- (*iterMap).first->eraseFromParent(); //Remove from the parent Function.
+ for (MBBInfoMap::iterator It = BlockInfoMap.begin(), E = BlockInfoMap.end();
+ It != E; ++It) {
+ if ((*It).second && (*It).second->IsRetired) {
+ assert(((*It).first)->getNumber() != -1);
+ DEBUG(
+ dbgs() << "Erase BB" << ((*It).first)->getNumber() << "\n";
+ );
+ (*It).first->eraseFromParent(); //Remove from the parent Function.
}
- delete (*iterMap).second;
+ delete (*It).second;
}
- blockInfoMap.clear();
+ BlockInfoMap.clear();
+ LLInfoMap.clear();
- // clear loopLandInfoMap
- for (typename LoopLandInfoMap::iterator iterMap = loopLandInfoMap.begin(),
- iterEndMap = loopLandInfoMap.end(); iterMap != iterEndMap; ++iterMap) {
- delete (*iterMap).second;
- }
- loopLandInfoMap.clear();
+ DEBUG(
+ FuncRep->viewCFG();
+ );
- if (DEBUGME) {
- func.viewCFG();
- }
-
- if (!finish) {
- assert(!"IRREDUCIBL_CF");
- }
+ if (!Finish)
+ llvm_unreachable("IRREDUCIBL_CF");
return true;
-} //CFGStructurizer::run
-
-/// Print the ordered Blocks.
-///
-template<class PassT>
-void CFGStructurizer<PassT>::printOrderedBlocks(llvm::raw_ostream &os) {
- size_t i = 0;
- for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::const_iterator
- iterBlk = orderedBlks.begin(), iterBlkEnd = orderedBlks.end();
- iterBlk != iterBlkEnd;
- ++iterBlk, ++i) {
- os << "BB" << (*iterBlk)->getNumber();
- os << "(" << getSCCNum(*iterBlk) << "," << (*iterBlk)->size() << ")";
- if (i != 0 && i % 10 == 0) {
- os << "\n";
- } else {
- os << " ";
- }
- }
-} //printOrderedBlocks
-
-/// Compute the reversed DFS post order of Blocks
-///
-template<class PassT> void CFGStructurizer<PassT>::orderBlocks() {
- int sccNum = 0;
- BlockT *bb;
- for (scc_iterator<FuncT *> sccIter = scc_begin(funcRep),
- sccEnd = scc_end(funcRep); sccIter != sccEnd; ++sccIter, ++sccNum) {
- std::vector<BlockT *> &sccNext = *sccIter;
- for (typename std::vector<BlockT *>::const_iterator
- blockIter = sccNext.begin(), blockEnd = sccNext.end();
+}
+
+
+
+void AMDGPUCFGStructurizer::orderBlocks(MachineFunction *MF) {
+ int SccNum = 0;
+ MachineBasicBlock *MBB;
+ for (scc_iterator<MachineFunction *> It = scc_begin(MF), E = scc_end(MF);
+ It != E; ++It, ++SccNum) {
+ std::vector<MachineBasicBlock *> &SccNext = *It;
+ for (std::vector<MachineBasicBlock *>::const_iterator
+ blockIter = SccNext.begin(), blockEnd = SccNext.end();
blockIter != blockEnd; ++blockIter) {
- bb = *blockIter;
- orderedBlks.push_back(bb);
- recordSccnum(bb, sccNum);
+ MBB = *blockIter;
+ OrderedBlks.push_back(MBB);
+ recordSccnum(MBB, SccNum);
}
}
//walk through all the block in func to check for unreachable
- for (BlockIterator blockIter1 = FuncGTraits::nodes_begin(funcRep),
- blockEnd1 = FuncGTraits::nodes_end(funcRep);
- blockIter1 != blockEnd1; ++blockIter1) {
- BlockT *bb = &(*blockIter1);
- sccNum = getSCCNum(bb);
- if (sccNum == INVALIDSCCNUM) {
- errs() << "unreachable block BB" << bb->getNumber() << "\n";
- }
+ typedef GraphTraits<MachineFunction *> GTM;
+ MachineFunction::iterator It = GTM::nodes_begin(MF), E = GTM::nodes_end(MF);
+ for (; It != E; ++It) {
+ MachineBasicBlock *MBB = &(*It);
+ SccNum = getSCCNum(MBB);
+ if (SccNum == INVALIDSCCNUM)
+ dbgs() << "unreachable block BB" << MBB->getNumber() << "\n";
}
-} //orderBlocks
+}
-template<class PassT> int CFGStructurizer<PassT>::patternMatch(BlockT *curBlk) {
- int numMatch = 0;
- int curMatch;
+int AMDGPUCFGStructurizer::patternMatch(MachineBasicBlock *MBB) {
+ int NumMatch = 0;
+ int CurMatch;
- if (DEBUGME) {
- errs() << "Begin patternMatch BB" << curBlk->getNumber() << "\n";
- }
+ DEBUG(
+ dbgs() << "Begin patternMatch BB" << MBB->getNumber() << "\n";
+ );
- while ((curMatch = patternMatchGroup(curBlk)) > 0) {
- numMatch += curMatch;
- }
+ while ((CurMatch = patternMatchGroup(MBB)) > 0)
+ NumMatch += CurMatch;
- if (DEBUGME) {
- errs() << "End patternMatch BB" << curBlk->getNumber()
- << ", numMatch = " << numMatch << "\n";
- }
+ DEBUG(
+ dbgs() << "End patternMatch BB" << MBB->getNumber()
+ << ", numMatch = " << NumMatch << "\n";
+ );
+
+ return NumMatch;
+}
+
+int AMDGPUCFGStructurizer::patternMatchGroup(MachineBasicBlock *MBB) {
+ int NumMatch = 0;
+ NumMatch += loopendPatternMatch();
+ NumMatch += serialPatternMatch(MBB);
+ NumMatch += ifPatternMatch(MBB);
+ return NumMatch;
+}
- return numMatch;
-} //patternMatch
-
-template<class PassT>
-int CFGStructurizer<PassT>::patternMatchGroup(BlockT *curBlk) {
- int numMatch = 0;
- numMatch += serialPatternMatch(curBlk);
- numMatch += ifPatternMatch(curBlk);
- numMatch += loopendPatternMatch(curBlk);
- numMatch += loopPatternMatch(curBlk);
- return numMatch;
-}//patternMatchGroup
-
-template<class PassT>
-int CFGStructurizer<PassT>::serialPatternMatch(BlockT *curBlk) {
- if (curBlk->succ_size() != 1) {
+
+int AMDGPUCFGStructurizer::serialPatternMatch(MachineBasicBlock *MBB) {
+ if (MBB->succ_size() != 1)
return 0;
- }
- BlockT *childBlk = *curBlk->succ_begin();
- if (childBlk->pred_size() != 1 || isActiveLoophead(childBlk)) {
+ MachineBasicBlock *childBlk = *MBB->succ_begin();
+ if (childBlk->pred_size() != 1 || isActiveLoophead(childBlk))
return 0;
- }
- mergeSerialBlock(curBlk, childBlk);
+ mergeSerialBlock(MBB, childBlk);
++numSerialPatternMatch;
return 1;
-} //serialPatternMatch
+}
-template<class PassT>
-int CFGStructurizer<PassT>::ifPatternMatch(BlockT *curBlk) {
+int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) {
//two edges
- if (curBlk->succ_size() != 2) {
+ if (MBB->succ_size() != 2)
return 0;
- }
-
- if (hasBackEdge(curBlk)) {
+ if (hasBackEdge(MBB))
return 0;
- }
-
- InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(curBlk);
- if (branchInstr == NULL) {
+ MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
+ if (!BranchMI)
return 0;
- }
- assert(CFGTraits::isCondBranch(branchInstr));
+ assert(isCondBranch(BranchMI));
+ int NumMatch = 0;
- BlockT *trueBlk = CFGTraits::getTrueBranch(branchInstr);
- BlockT *falseBlk = CFGTraits::getFalseBranch(curBlk, branchInstr);
- BlockT *landBlk;
- int cloned = 0;
+ MachineBasicBlock *TrueMBB = getTrueBranch(BranchMI);
+ NumMatch += serialPatternMatch(TrueMBB);
+ NumMatch += ifPatternMatch(TrueMBB);
+ MachineBasicBlock *FalseMBB = getFalseBranch(MBB, BranchMI);
+ NumMatch += serialPatternMatch(FalseMBB);
+ NumMatch += ifPatternMatch(FalseMBB);
+ MachineBasicBlock *LandBlk;
+ int Cloned = 0;
+ assert (!TrueMBB->succ_empty() || !FalseMBB->succ_empty());
// TODO: Simplify
- if (trueBlk->succ_size() == 1 && falseBlk->succ_size() == 1
- && *trueBlk->succ_begin() == *falseBlk->succ_begin()) {
- landBlk = *trueBlk->succ_begin();
- } else if (trueBlk->succ_size() == 0 && falseBlk->succ_size() == 0) {
- landBlk = NULL;
- } else if (trueBlk->succ_size() == 1 && *trueBlk->succ_begin() == falseBlk) {
- landBlk = falseBlk;
- falseBlk = NULL;
- } else if (falseBlk->succ_size() == 1
- && *falseBlk->succ_begin() == trueBlk) {
- landBlk = trueBlk;
- trueBlk = NULL;
- } else if (falseBlk->succ_size() == 1
- && isSameloopDetachedContbreak(trueBlk, falseBlk)) {
- landBlk = *falseBlk->succ_begin();
- } else if (trueBlk->succ_size() == 1
- && isSameloopDetachedContbreak(falseBlk, trueBlk)) {
- landBlk = *trueBlk->succ_begin();
+ if (TrueMBB->succ_size() == 1 && FalseMBB->succ_size() == 1
+ && *TrueMBB->succ_begin() == *FalseMBB->succ_begin()) {
+ // Diamond pattern
+ LandBlk = *TrueMBB->succ_begin();
+ } else if (TrueMBB->succ_size() == 1 && *TrueMBB->succ_begin() == FalseMBB) {
+ // Triangle pattern, false is empty
+ LandBlk = FalseMBB;
+ FalseMBB = NULL;
+ } else if (FalseMBB->succ_size() == 1
+ && *FalseMBB->succ_begin() == TrueMBB) {
+ // Triangle pattern, true is empty
+ // We reverse the predicate to make a triangle, empty false pattern;
+ std::swap(TrueMBB, FalseMBB);
+ reversePredicateSetter(MBB->end());
+ LandBlk = FalseMBB;
+ FalseMBB = NULL;
+ } else if (FalseMBB->succ_size() == 1
+ && isSameloopDetachedContbreak(TrueMBB, FalseMBB)) {
+ LandBlk = *FalseMBB->succ_begin();
+ } else if (TrueMBB->succ_size() == 1
+ && isSameloopDetachedContbreak(FalseMBB, TrueMBB)) {
+ LandBlk = *TrueMBB->succ_begin();
} else {
- return handleJumpintoIf(curBlk, trueBlk, falseBlk);
+ return NumMatch + handleJumpintoIf(MBB, TrueMBB, FalseMBB);
}
// improveSimpleJumpinfoIf can handle the case where landBlk == NULL but the
// new BB created for landBlk==NULL may introduce new challenge to the
// reduction process.
- if (landBlk != NULL &&
- ((trueBlk && trueBlk->pred_size() > 1)
- || (falseBlk && falseBlk->pred_size() > 1))) {
- cloned += improveSimpleJumpintoIf(curBlk, trueBlk, falseBlk, &landBlk);
+ if (LandBlk &&
+ ((TrueMBB && TrueMBB->pred_size() > 1)
+ || (FalseMBB && FalseMBB->pred_size() > 1))) {
+ Cloned += improveSimpleJumpintoIf(MBB, TrueMBB, FalseMBB, &LandBlk);
}
- if (trueBlk && trueBlk->pred_size() > 1) {
- trueBlk = cloneBlockForPredecessor(trueBlk, curBlk);
- ++cloned;
+ if (TrueMBB && TrueMBB->pred_size() > 1) {
+ TrueMBB = cloneBlockForPredecessor(TrueMBB, MBB);
+ ++Cloned;
}
- if (falseBlk && falseBlk->pred_size() > 1) {
- falseBlk = cloneBlockForPredecessor(falseBlk, curBlk);
- ++cloned;
+ if (FalseMBB && FalseMBB->pred_size() > 1) {
+ FalseMBB = cloneBlockForPredecessor(FalseMBB, MBB);
+ ++Cloned;
}
- mergeIfthenelseBlock(branchInstr, curBlk, trueBlk, falseBlk, landBlk);
+ mergeIfthenelseBlock(BranchMI, MBB, TrueMBB, FalseMBB, LandBlk);
++numIfPatternMatch;
- numClonedBlock += cloned;
-
- return 1 + cloned;
-} //ifPatternMatch
+ numClonedBlock += Cloned;
-template<class PassT>
-int CFGStructurizer<PassT>::switchPatternMatch(BlockT *curBlk) {
- return 0;
-} //switchPatternMatch
+ return 1 + Cloned + NumMatch;
+}
-template<class PassT>
-int CFGStructurizer<PassT>::loopendPatternMatch(BlockT *curBlk) {
- LoopT *loopRep = loopInfo->getLoopFor(curBlk);
- typename std::vector<LoopT *> nestedLoops;
- while (loopRep) {
- nestedLoops.push_back(loopRep);
- loopRep = loopRep->getParentLoop();
+int AMDGPUCFGStructurizer::loopendPatternMatch() {
+ std::vector<MachineLoop *> NestedLoops;
+ for (MachineLoopInfo::iterator It = MLI->begin(), E = MLI->end();
+ It != E; ++It) {
+ df_iterator<MachineLoop *> LpIt = df_begin(*It),
+ LpE = df_end(*It);
+ for (; LpIt != LpE; ++LpIt)
+ NestedLoops.push_back(*LpIt);
}
-
- if (nestedLoops.size() == 0) {
+ if (NestedLoops.size() == 0)
return 0;
- }
// Process nested loop outside->inside, so "continue" to a outside loop won't
// be mistaken as "break" of the current loop.
- int num = 0;
- for (typename std::vector<LoopT *>::reverse_iterator
- iter = nestedLoops.rbegin(), iterEnd = nestedLoops.rend();
- iter != iterEnd; ++iter) {
- loopRep = *iter;
-
- if (getLoopLandBlock(loopRep) != NULL) {
+ int Num = 0;
+ for (std::vector<MachineLoop *>::reverse_iterator It = NestedLoops.rbegin(),
+ E = NestedLoops.rend(); It != E; ++It) {
+ MachineLoop *ExaminedLoop = *It;
+ if (ExaminedLoop->getNumBlocks() == 0 || Visited[ExaminedLoop])
continue;
- }
-
- BlockT *loopHeader = loopRep->getHeader();
-
- int numBreak = loopbreakPatternMatch(loopRep, loopHeader);
-
- if (numBreak == -1) {
+ DEBUG(dbgs() << "Processing:\n"; ExaminedLoop->dump(););
+ int NumBreak = mergeLoop(ExaminedLoop);
+ if (NumBreak == -1)
break;
- }
-
- int numCont = loopcontPatternMatch(loopRep, loopHeader);
- num += numBreak + numCont;
- }
-
- return num;
-} //loopendPatternMatch
-
-template<class PassT>
-int CFGStructurizer<PassT>::loopPatternMatch(BlockT *curBlk) {
- if (curBlk->succ_size() != 0) {
- return 0;
- }
-
- int numLoop = 0;
- LoopT *loopRep = loopInfo->getLoopFor(curBlk);
- while (loopRep && loopRep->getHeader() == curBlk) {
- LoopLandInfo *loopLand = getLoopLandInfo(loopRep);
- if (loopLand) {
- BlockT *landBlk = loopLand->landBlk;
- assert(landBlk);
- if (!isRetiredBlock(landBlk)) {
- mergeLooplandBlock(curBlk, loopLand);
- ++numLoop;
- }
- }
- loopRep = loopRep->getParentLoop();
- }
-
- numLoopPatternMatch += numLoop;
-
- return numLoop;
-} //loopPatternMatch
-
-template<class PassT>
-int CFGStructurizer<PassT>::loopbreakPatternMatch(LoopT *loopRep,
- BlockT *loopHeader) {
- BlockTSmallerVector exitingBlks;
- loopRep->getExitingBlocks(exitingBlks);
-
- if (DEBUGME) {
- errs() << "Loop has " << exitingBlks.size() << " exiting blocks\n";
- }
-
- if (exitingBlks.size() == 0) {
- setLoopLandBlock(loopRep);
- return 0;
- }
-
- // Compute the corresponding exitBlks and exit block set.
- BlockTSmallerVector exitBlks;
- std::set<BlockT *> exitBlkSet;
- for (typename BlockTSmallerVector::const_iterator iter = exitingBlks.begin(),
- iterEnd = exitingBlks.end(); iter != iterEnd; ++iter) {
- BlockT *exitingBlk = *iter;
- BlockT *exitBlk = exitingBlock2ExitBlock(loopRep, exitingBlk);
- exitBlks.push_back(exitBlk);
- exitBlkSet.insert(exitBlk); //non-duplicate insert
- }
-
- assert(exitBlkSet.size() > 0);
- assert(exitBlks.size() == exitingBlks.size());
-
- if (DEBUGME) {
- errs() << "Loop has " << exitBlkSet.size() << " exit blocks\n";
+ Num += NumBreak;
}
+ return Num;
+}
- // Find exitLandBlk.
- BlockT *exitLandBlk = NULL;
- int numCloned = 0;
- int numSerial = 0;
-
- if (exitBlkSet.size() == 1) {
- exitLandBlk = *exitBlkSet.begin();
- } else {
- exitLandBlk = findNearestCommonPostDom(exitBlkSet);
-
- if (exitLandBlk == NULL) {
- return -1;
- }
-
- bool allInPath = true;
- bool allNotInPath = true;
- for (typename std::set<BlockT*>::const_iterator
- iter = exitBlkSet.begin(),
- iterEnd = exitBlkSet.end();
- iter != iterEnd; ++iter) {
- BlockT *exitBlk = *iter;
-
- PathToKind pathKind = singlePathTo(exitBlk, exitLandBlk, true);
- if (DEBUGME) {
- errs() << "BB" << exitBlk->getNumber()
- << " to BB" << exitLandBlk->getNumber() << " PathToKind="
- << pathKind << "\n";
- }
-
- allInPath = allInPath && (pathKind == SinglePath_InPath);
- allNotInPath = allNotInPath && (pathKind == SinglePath_NotInPath);
-
- if (!allInPath && !allNotInPath) {
- if (DEBUGME) {
- errs() << "singlePath check fail\n";
- }
- return -1;
- }
- } // check all exit blocks
-
- if (allNotInPath) {
-
- // TODO: Simplify, maybe separate function?
- LoopT *parentLoopRep = loopRep->getParentLoop();
- BlockT *parentLoopHeader = NULL;
- if (parentLoopRep)
- parentLoopHeader = parentLoopRep->getHeader();
-
- if (exitLandBlk == parentLoopHeader &&
- (exitLandBlk = relocateLoopcontBlock(parentLoopRep,
- loopRep,
- exitBlkSet,
- exitLandBlk)) != NULL) {
- if (DEBUGME) {
- errs() << "relocateLoopcontBlock success\n";
- }
- } else if ((exitLandBlk = addLoopEndbranchBlock(loopRep,
- exitingBlks,
- exitBlks)) != NULL) {
- if (DEBUGME) {
- errs() << "insertEndbranchBlock success\n";
- }
- } else {
- if (DEBUGME) {
- errs() << "loop exit fail\n";
- }
- return -1;
- }
- }
-
- // Handle side entry to exit path.
- exitBlks.clear();
- exitBlkSet.clear();
- for (typename BlockTSmallerVector::iterator iterExiting =
- exitingBlks.begin(),
- iterExitingEnd = exitingBlks.end();
- iterExiting != iterExitingEnd; ++iterExiting) {
- BlockT *exitingBlk = *iterExiting;
- BlockT *exitBlk = exitingBlock2ExitBlock(loopRep, exitingBlk);
- BlockT *newExitBlk = exitBlk;
-
- if (exitBlk != exitLandBlk && exitBlk->pred_size() > 1) {
- newExitBlk = cloneBlockForPredecessor(exitBlk, exitingBlk);
- ++numCloned;
- }
-
- numCloned += cloneOnSideEntryTo(exitingBlk, newExitBlk, exitLandBlk);
-
- exitBlks.push_back(newExitBlk);
- exitBlkSet.insert(newExitBlk);
- }
-
- for (typename BlockTSmallerVector::iterator iterExit = exitBlks.begin(),
- iterExitEnd = exitBlks.end();
- iterExit != iterExitEnd; ++iterExit) {
- BlockT *exitBlk = *iterExit;
- numSerial += serialPatternMatch(exitBlk);
- }
-
- for (typename BlockTSmallerVector::iterator iterExit = exitBlks.begin(),
- iterExitEnd = exitBlks.end();
- iterExit != iterExitEnd; ++iterExit) {
- BlockT *exitBlk = *iterExit;
- if (exitBlk->pred_size() > 1) {
- if (exitBlk != exitLandBlk) {
- return -1;
- }
- } else {
- if (exitBlk != exitLandBlk &&
- (exitBlk->succ_size() != 1 ||
- *exitBlk->succ_begin() != exitLandBlk)) {
- return -1;
- }
- }
- }
- } // else
-
- exitLandBlk = recordLoopLandBlock(loopRep, exitLandBlk, exitBlks, exitBlkSet);
-
- // Fold break into the breaking block. Leverage across level breaks.
- assert(exitingBlks.size() == exitBlks.size());
- for (typename BlockTSmallerVector::const_iterator iterExit = exitBlks.begin(),
- iterExiting = exitingBlks.begin(), iterExitEnd = exitBlks.end();
- iterExit != iterExitEnd; ++iterExit, ++iterExiting) {
- BlockT *exitBlk = *iterExit;
- BlockT *exitingBlk = *iterExiting;
- assert(exitBlk->pred_size() == 1 || exitBlk == exitLandBlk);
- LoopT *exitingLoop = loopInfo->getLoopFor(exitingBlk);
- handleLoopbreak(exitingBlk, exitingLoop, exitBlk, loopRep, exitLandBlk);
- }
+int AMDGPUCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
+ MachineBasicBlock *LoopHeader = LoopRep->getHeader();
+ MBBVector ExitingMBBs;
+ LoopRep->getExitingBlocks(ExitingMBBs);
+ assert(!ExitingMBBs.empty() && "Infinite Loop not supported");
+ DEBUG(dbgs() << "Loop has " << ExitingMBBs.size() << " exiting blocks\n";);
+ // We assume a single ExitBlk
+ MBBVector ExitBlks;
+ LoopRep->getExitBlocks(ExitBlks);
+ SmallPtrSet<MachineBasicBlock *, 2> ExitBlkSet;
+ for (unsigned i = 0, e = ExitBlks.size(); i < e; ++i)
+ ExitBlkSet.insert(ExitBlks[i]);
+ assert(ExitBlkSet.size() == 1);
+ MachineBasicBlock *ExitBlk = *ExitBlks.begin();
+ assert(ExitBlk && "Loop has several exit block");
+ MBBVector LatchBlks;
+ typedef GraphTraits<Inverse<MachineBasicBlock*> > InvMBBTraits;
+ InvMBBTraits::ChildIteratorType PI = InvMBBTraits::child_begin(LoopHeader),
+ PE = InvMBBTraits::child_end(LoopHeader);
+ for (; PI != PE; PI++) {
+ if (LoopRep->contains(*PI))
+ LatchBlks.push_back(*PI);
+ }
+
+ for (unsigned i = 0, e = ExitingMBBs.size(); i < e; ++i)
+ mergeLoopbreakBlock(ExitingMBBs[i], ExitBlk);
+ for (unsigned i = 0, e = LatchBlks.size(); i < e; ++i)
+ settleLoopcontBlock(LatchBlks[i], LoopHeader);
+ int Match = 0;
+ do {
+ Match = 0;
+ Match += serialPatternMatch(LoopHeader);
+ Match += ifPatternMatch(LoopHeader);
+ } while (Match > 0);
+ mergeLooplandBlock(LoopHeader, ExitBlk);
+ MachineLoop *ParentLoop = LoopRep->getParentLoop();
+ if (ParentLoop)
+ MLI->changeLoopFor(LoopHeader, ParentLoop);
+ else
+ MLI->removeBlock(LoopHeader);
+ Visited[LoopRep] = true;
+ return 1;
+}
- int numBreak = static_cast<int>(exitingBlks.size());
- numLoopbreakPatternMatch += numBreak;
- numClonedBlock += numCloned;
- return numBreak + numSerial + numCloned;
-} //loopbreakPatternMatch
-
-template<class PassT>
-int CFGStructurizer<PassT>::loopcontPatternMatch(LoopT *loopRep,
- BlockT *loopHeader) {
- int numCont = 0;
- SmallVector<BlockT *, DEFAULT_VEC_SLOTS> contBlk;
- for (typename InvBlockGTraits::ChildIteratorType iter =
- InvBlockGTraits::child_begin(loopHeader),
- iterEnd = InvBlockGTraits::child_end(loopHeader);
- iter != iterEnd; ++iter) {
- BlockT *curBlk = *iter;
- if (loopRep->contains(curBlk)) {
- handleLoopcontBlock(curBlk, loopInfo->getLoopFor(curBlk),
- loopHeader, loopRep);
- contBlk.push_back(curBlk);
- ++numCont;
+int AMDGPUCFGStructurizer::loopcontPatternMatch(MachineLoop *LoopRep,
+ MachineBasicBlock *LoopHeader) {
+ int NumCont = 0;
+ SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> ContMBB;
+ typedef GraphTraits<Inverse<MachineBasicBlock *> > GTIM;
+ GTIM::ChildIteratorType It = GTIM::child_begin(LoopHeader),
+ E = GTIM::child_end(LoopHeader);
+ for (; It != E; ++It) {
+ MachineBasicBlock *MBB = *It;
+ if (LoopRep->contains(MBB)) {
+ handleLoopcontBlock(MBB, MLI->getLoopFor(MBB),
+ LoopHeader, LoopRep);
+ ContMBB.push_back(MBB);
+ ++NumCont;
}
}
- for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::iterator
- iter = contBlk.begin(), iterEnd = contBlk.end();
- iter != iterEnd; ++iter) {
- (*iter)->removeSuccessor(loopHeader);
+ for (SmallVectorImpl<MachineBasicBlock *>::iterator It = ContMBB.begin(),
+ E = ContMBB.end(); It != E; ++It) {
+ (*It)->removeSuccessor(LoopHeader);
}
- numLoopcontPatternMatch += numCont;
+ numLoopcontPatternMatch += NumCont;
- return numCont;
-} //loopcontPatternMatch
+ return NumCont;
+}
-template<class PassT>
-bool CFGStructurizer<PassT>::isSameloopDetachedContbreak(BlockT *src1Blk,
- BlockT *src2Blk) {
- // return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in the
- // same loop with LoopLandInfo without explicitly keeping track of
- // loopContBlks and loopBreakBlks, this is a method to get the information.
- //
- if (src1Blk->succ_size() == 0) {
- LoopT *loopRep = loopInfo->getLoopFor(src1Blk);
- if (loopRep != NULL && loopRep == loopInfo->getLoopFor(src2Blk)) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
- if (theEntry != NULL) {
- if (DEBUGME) {
- errs() << "isLoopContBreakBlock yes src1 = BB"
- << src1Blk->getNumber()
- << " src2 = BB" << src2Blk->getNumber() << "\n";
- }
+bool AMDGPUCFGStructurizer::isSameloopDetachedContbreak(
+ MachineBasicBlock *Src1MBB, MachineBasicBlock *Src2MBB) {
+ if (Src1MBB->succ_size() == 0) {
+ MachineLoop *LoopRep = MLI->getLoopFor(Src1MBB);
+ if (LoopRep&& LoopRep == MLI->getLoopFor(Src2MBB)) {
+ MachineBasicBlock *&TheEntry = LLInfoMap[LoopRep];
+ if (TheEntry) {
+ DEBUG(
+ dbgs() << "isLoopContBreakBlock yes src1 = BB"
+ << Src1MBB->getNumber()
+ << " src2 = BB" << Src2MBB->getNumber() << "\n";
+ );
return true;
}
}
}
return false;
-} //isSameloopDetachedContbreak
-
-template<class PassT>
-int CFGStructurizer<PassT>::handleJumpintoIf(BlockT *headBlk,
- BlockT *trueBlk,
- BlockT *falseBlk) {
- int num = handleJumpintoIfImp(headBlk, trueBlk, falseBlk);
- if (num == 0) {
- if (DEBUGME) {
- errs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n";
- }
- num = handleJumpintoIfImp(headBlk, falseBlk, trueBlk);
- }
- return num;
}
-template<class PassT>
-int CFGStructurizer<PassT>::handleJumpintoIfImp(BlockT *headBlk,
- BlockT *trueBlk,
- BlockT *falseBlk) {
- int num = 0;
- BlockT *downBlk;
-
- //trueBlk could be the common post dominator
- downBlk = trueBlk;
-
- if (DEBUGME) {
- errs() << "handleJumpintoIfImp head = BB" << headBlk->getNumber()
- << " true = BB" << trueBlk->getNumber()
- << ", numSucc=" << trueBlk->succ_size()
- << " false = BB" << falseBlk->getNumber() << "\n";
+int AMDGPUCFGStructurizer::handleJumpintoIf(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
+ int Num = handleJumpintoIfImp(HeadMBB, TrueMBB, FalseMBB);
+ if (Num == 0) {
+ DEBUG(
+ dbgs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n";
+ );
+ Num = handleJumpintoIfImp(HeadMBB, FalseMBB, TrueMBB);
}
+ return Num;
+}
- while (downBlk) {
- if (DEBUGME) {
- errs() << "check down = BB" << downBlk->getNumber();
- }
-
- if (singlePathTo(falseBlk, downBlk) == SinglePath_InPath) {
- if (DEBUGME) {
- errs() << " working\n";
- }
-
- num += cloneOnSideEntryTo(headBlk, trueBlk, downBlk);
- num += cloneOnSideEntryTo(headBlk, falseBlk, downBlk);
+int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
+ int Num = 0;
+ MachineBasicBlock *DownBlk;
- numClonedBlock += num;
- num += serialPatternMatch(*headBlk->succ_begin());
- num += serialPatternMatch(*(++headBlk->succ_begin()));
- num += ifPatternMatch(headBlk);
- assert(num > 0);
+ //trueBlk could be the common post dominator
+ DownBlk = TrueMBB;
+
+ DEBUG(
+ dbgs() << "handleJumpintoIfImp head = BB" << HeadMBB->getNumber()
+ << " true = BB" << TrueMBB->getNumber()
+ << ", numSucc=" << TrueMBB->succ_size()
+ << " false = BB" << FalseMBB->getNumber() << "\n";
+ );
+
+ while (DownBlk) {
+ DEBUG(
+ dbgs() << "check down = BB" << DownBlk->getNumber();
+ );
+
+ if (singlePathTo(FalseMBB, DownBlk) == SinglePath_InPath) {
+ DEBUG(
+ dbgs() << " working\n";
+ );
+
+ Num += cloneOnSideEntryTo(HeadMBB, TrueMBB, DownBlk);
+ Num += cloneOnSideEntryTo(HeadMBB, FalseMBB, DownBlk);
+
+ numClonedBlock += Num;
+ Num += serialPatternMatch(*HeadMBB->succ_begin());
+ Num += serialPatternMatch(*llvm::next(HeadMBB->succ_begin()));
+ Num += ifPatternMatch(HeadMBB);
+ assert(Num > 0);
break;
}
- if (DEBUGME) {
- errs() << " not working\n";
- }
- downBlk = (downBlk->succ_size() == 1) ? (*downBlk->succ_begin()) : NULL;
+ DEBUG(
+ dbgs() << " not working\n";
+ );
+ DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : NULL;
} // walk down the postDomTree
- return num;
-} //handleJumpintoIf
-
-template<class PassT>
-void CFGStructurizer<PassT>::showImproveSimpleJumpintoIf(BlockT *headBlk,
- BlockT *trueBlk,
- BlockT *falseBlk,
- BlockT *landBlk,
- bool detail) {
- errs() << "head = BB" << headBlk->getNumber()
- << " size = " << headBlk->size();
- if (detail) {
- errs() << "\n";
- headBlk->print(errs());
- errs() << "\n";
+ return Num;
+}
+
+void AMDGPUCFGStructurizer::showImproveSimpleJumpintoIf(
+ MachineBasicBlock *HeadMBB, MachineBasicBlock *TrueMBB,
+ MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB, bool Detail) {
+ dbgs() << "head = BB" << HeadMBB->getNumber()
+ << " size = " << HeadMBB->size();
+ if (Detail) {
+ dbgs() << "\n";
+ HeadMBB->print(dbgs());
+ dbgs() << "\n";
}
- if (trueBlk) {
- errs() << ", true = BB" << trueBlk->getNumber() << " size = "
- << trueBlk->size() << " numPred = " << trueBlk->pred_size();
- if (detail) {
- errs() << "\n";
- trueBlk->print(errs());
- errs() << "\n";
+ if (TrueMBB) {
+ dbgs() << ", true = BB" << TrueMBB->getNumber() << " size = "
+ << TrueMBB->size() << " numPred = " << TrueMBB->pred_size();
+ if (Detail) {
+ dbgs() << "\n";
+ TrueMBB->print(dbgs());
+ dbgs() << "\n";
}
}
- if (falseBlk) {
- errs() << ", false = BB" << falseBlk->getNumber() << " size = "
- << falseBlk->size() << " numPred = " << falseBlk->pred_size();
- if (detail) {
- errs() << "\n";
- falseBlk->print(errs());
- errs() << "\n";
+ if (FalseMBB) {
+ dbgs() << ", false = BB" << FalseMBB->getNumber() << " size = "
+ << FalseMBB->size() << " numPred = " << FalseMBB->pred_size();
+ if (Detail) {
+ dbgs() << "\n";
+ FalseMBB->print(dbgs());
+ dbgs() << "\n";
}
}
- if (landBlk) {
- errs() << ", land = BB" << landBlk->getNumber() << " size = "
- << landBlk->size() << " numPred = " << landBlk->pred_size();
- if (detail) {
- errs() << "\n";
- landBlk->print(errs());
- errs() << "\n";
+ if (LandMBB) {
+ dbgs() << ", land = BB" << LandMBB->getNumber() << " size = "
+ << LandMBB->size() << " numPred = " << LandMBB->pred_size();
+ if (Detail) {
+ dbgs() << "\n";
+ LandMBB->print(dbgs());
+ dbgs() << "\n";
}
}
- errs() << "\n";
-} //showImproveSimpleJumpintoIf
+ dbgs() << "\n";
+}
-template<class PassT>
-int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk,
- BlockT *trueBlk,
- BlockT *falseBlk,
- BlockT **plandBlk) {
- bool migrateTrue = false;
- bool migrateFalse = false;
+int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
+ MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
+ MachineBasicBlock **LandMBBPtr) {
+ bool MigrateTrue = false;
+ bool MigrateFalse = false;
- BlockT *landBlk = *plandBlk;
+ MachineBasicBlock *LandBlk = *LandMBBPtr;
- assert((trueBlk == NULL || trueBlk->succ_size() <= 1)
- && (falseBlk == NULL || falseBlk->succ_size() <= 1));
+ assert((!TrueMBB || TrueMBB->succ_size() <= 1)
+ && (!FalseMBB || FalseMBB->succ_size() <= 1));
- if (trueBlk == falseBlk) {
+ if (TrueMBB == FalseMBB)
return 0;
- }
- migrateTrue = needMigrateBlock(trueBlk);
- migrateFalse = needMigrateBlock(falseBlk);
+ MigrateTrue = needMigrateBlock(TrueMBB);
+ MigrateFalse = needMigrateBlock(FalseMBB);
- if (!migrateTrue && !migrateFalse) {
+ if (!MigrateTrue && !MigrateFalse)
return 0;
- }
// If we need to migrate either trueBlk and falseBlk, migrate the rest that
// have more than one predecessors. without doing this, its predecessor
// rather than headBlk will have undefined value in initReg.
- if (!migrateTrue && trueBlk && trueBlk->pred_size() > 1) {
- migrateTrue = true;
- }
- if (!migrateFalse && falseBlk && falseBlk->pred_size() > 1) {
- migrateFalse = true;
- }
+ if (!MigrateTrue && TrueMBB && TrueMBB->pred_size() > 1)
+ MigrateTrue = true;
+ if (!MigrateFalse && FalseMBB && FalseMBB->pred_size() > 1)
+ MigrateFalse = true;
- if (DEBUGME) {
- errs() << "before improveSimpleJumpintoIf: ";
- showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 0);
- }
+ DEBUG(
+ dbgs() << "before improveSimpleJumpintoIf: ";
+ showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
+ );
// org: headBlk => if () {trueBlk} else {falseBlk} => landBlk
//
@@ -1183,205 +1336,192 @@ int CFGStructurizer<PassT>::improveSimpleJumpintoIf(BlockT *headBlk,
// add initReg = initVal to headBlk
const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
- unsigned initReg =
- funcRep->getRegInfo().createVirtualRegister(I32RC);
- if (!migrateTrue || !migrateFalse) {
- int initVal = migrateTrue ? 0 : 1;
- CFGTraits::insertAssignInstrBefore(headBlk, passRep, initReg, initVal);
+ if (!MigrateTrue || !MigrateFalse) {
+ // XXX: We have an opportunity here to optimize the "branch into if" case
+ // here. Branch into if looks like this:
+ // entry
+ // / |
+ // diamond_head branch_from
+ // / \ |
+ // diamond_false diamond_true
+ // \ /
+ // done
+ //
+ // The diamond_head block begins the "if" and the diamond_true block
+ // is the block being "branched into".
+ //
+ // If MigrateTrue is true, then TrueBB is the block being "branched into"
+ // and if MigrateFalse is true, then FalseBB is the block being
+ // "branched into"
+ //
+ // Here is the pseudo code for how I think the optimization should work:
+ // 1. Insert MOV GPR0, 0 before the branch instruction in diamond_head.
+ // 2. Insert MOV GPR0, 1 before the branch instruction in branch_from.
+ // 3. Move the branch instruction from diamond_head into its own basic
+ // block (new_block).
+ // 4. Add an unconditional branch from diamond_head to new_block
+ // 5. Replace the branch instruction in branch_from with an unconditional
+ // branch to new_block. If branch_from has multiple predecessors, then
+ // we need to replace the True/False block in the branch
+ // instruction instead of replacing it.
+ // 6. Change the condition of the branch instruction in new_block from
+ // COND to (COND || GPR0)
+ //
+ // In order insert these MOV instruction, we will need to use the
+ // RegisterScavenger. Usually liveness stops being tracked during
+ // the late machine optimization passes, however if we implement
+ // bool TargetRegisterInfo::requiresRegisterScavenging(
+ // const MachineFunction &MF)
+ // and have it return true, liveness will be tracked correctly
+ // by generic optimization passes. We will also need to make sure that
+ // all of our target-specific passes that run after regalloc and before
+ // the CFGStructurizer track liveness and we will need to modify this pass
+ // to correctly track liveness.
+ //
+ // After the above changes, the new CFG should look like this:
+ // entry
+ // / |
+ // diamond_head branch_from
+ // \ /
+ // new_block
+ // / |
+ // diamond_false diamond_true
+ // \ /
+ // done
+ //
+ // Without this optimization, we are forced to duplicate the diamond_true
+ // block and we will end up with a CFG like this:
+ //
+ // entry
+ // / |
+ // diamond_head branch_from
+ // / \ |
+ // diamond_false diamond_true diamond_true (duplicate)
+ // \ / |
+ // done --------------------|
+ //
+ // Duplicating diamond_true can be very costly especially if it has a
+ // lot of instructions.
+ return 0;
}
- int numNewBlk = 0;
-
- if (landBlk == NULL) {
- landBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(landBlk); //insert to function
-
- if (trueBlk) {
- trueBlk->addSuccessor(landBlk);
- } else {
- headBlk->addSuccessor(landBlk);
- }
-
- if (falseBlk) {
- falseBlk->addSuccessor(landBlk);
- } else {
- headBlk->addSuccessor(landBlk);
- }
-
- numNewBlk ++;
- }
+ int NumNewBlk = 0;
- bool landBlkHasOtherPred = (landBlk->pred_size() > 2);
+ bool LandBlkHasOtherPred = (LandBlk->pred_size() > 2);
//insert AMDGPU::ENDIF to avoid special case "input landBlk == NULL"
- typename BlockT::iterator insertPos =
- CFGTraits::getInstrPos
- (landBlk, CFGTraits::insertInstrBefore(landBlk, AMDGPU::ENDIF, passRep));
-
- if (landBlkHasOtherPred) {
- unsigned immReg =
- funcRep->getRegInfo().createVirtualRegister(I32RC);
- CFGTraits::insertAssignInstrBefore(insertPos, passRep, immReg, 2);
- unsigned cmpResReg =
- funcRep->getRegInfo().createVirtualRegister(I32RC);
-
- CFGTraits::insertCompareInstrBefore(landBlk, insertPos, passRep, cmpResReg,
- initReg, immReg);
- CFGTraits::insertCondBranchBefore(landBlk, insertPos,
- AMDGPU::IF_PREDICATE_SET, passRep,
- cmpResReg, DebugLoc());
- }
-
- CFGTraits::insertCondBranchBefore(landBlk, insertPos, AMDGPU::IF_PREDICATE_SET,
- passRep, initReg, DebugLoc());
-
- if (migrateTrue) {
- migrateInstruction(trueBlk, landBlk, insertPos);
+ MachineBasicBlock::iterator I = insertInstrBefore(LandBlk, AMDGPU::ENDIF);
+
+ if (LandBlkHasOtherPred) {
+ llvm_unreachable("Extra register needed to handle CFG");
+ unsigned CmpResReg =
+ HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
+ llvm_unreachable("Extra compare instruction needed to handle CFG");
+ insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET,
+ CmpResReg, DebugLoc());
+ }
+
+ // XXX: We are running this after RA, so creating virtual registers will
+ // cause an assertion failure in the PostRA scheduling pass.
+ unsigned InitReg =
+ HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
+ insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET, InitReg,
+ DebugLoc());
+
+ if (MigrateTrue) {
+ migrateInstruction(TrueMBB, LandBlk, I);
// need to uncondionally insert the assignment to ensure a path from its
// predecessor rather than headBlk has valid value in initReg if
// (initVal != 1).
- CFGTraits::insertAssignInstrBefore(trueBlk, passRep, initReg, 1);
+ llvm_unreachable("Extra register needed to handle CFG");
}
- CFGTraits::insertInstrBefore(insertPos, AMDGPU::ELSE, passRep);
+ insertInstrBefore(I, AMDGPU::ELSE);
- if (migrateFalse) {
- migrateInstruction(falseBlk, landBlk, insertPos);
+ if (MigrateFalse) {
+ migrateInstruction(FalseMBB, LandBlk, I);
// need to uncondionally insert the assignment to ensure a path from its
// predecessor rather than headBlk has valid value in initReg if
// (initVal != 0)
- CFGTraits::insertAssignInstrBefore(falseBlk, passRep, initReg, 0);
+ llvm_unreachable("Extra register needed to handle CFG");
}
- if (landBlkHasOtherPred) {
+ if (LandBlkHasOtherPred) {
// add endif
- CFGTraits::insertInstrBefore(insertPos, AMDGPU::ENDIF, passRep);
+ insertInstrBefore(I, AMDGPU::ENDIF);
// put initReg = 2 to other predecessors of landBlk
- for (typename BlockT::pred_iterator predIter = landBlk->pred_begin(),
- predIterEnd = landBlk->pred_end(); predIter != predIterEnd;
- ++predIter) {
- BlockT *curBlk = *predIter;
- if (curBlk != trueBlk && curBlk != falseBlk) {
- CFGTraits::insertAssignInstrBefore(curBlk, passRep, initReg, 2);
- }
- } //for
- }
- if (DEBUGME) {
- errs() << "result from improveSimpleJumpintoIf: ";
- showImproveSimpleJumpintoIf(headBlk, trueBlk, falseBlk, landBlk, 0);
- }
-
- // update landBlk
- *plandBlk = landBlk;
-
- return numNewBlk;
-} //improveSimpleJumpintoIf
-
-template<class PassT>
-void CFGStructurizer<PassT>::handleLoopbreak(BlockT *exitingBlk,
- LoopT *exitingLoop,
- BlockT *exitBlk,
- LoopT *exitLoop,
- BlockT *landBlk) {
- if (DEBUGME) {
- errs() << "Trying to break loop-depth = " << getLoopDepth(exitLoop)
- << " from loop-depth = " << getLoopDepth(exitingLoop) << "\n";
- }
- const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
-
- RegiT initReg = INVALIDREGNUM;
- if (exitingLoop != exitLoop) {
- initReg = static_cast<int>
- (funcRep->getRegInfo().createVirtualRegister(I32RC));
- assert(initReg != INVALIDREGNUM);
- addLoopBreakInitReg(exitLoop, initReg);
- while (exitingLoop != exitLoop && exitingLoop) {
- addLoopBreakOnReg(exitingLoop, initReg);
- exitingLoop = exitingLoop->getParentLoop();
+ for (MachineBasicBlock::pred_iterator PI = LandBlk->pred_begin(),
+ PE = LandBlk->pred_end(); PI != PE; ++PI) {
+ MachineBasicBlock *MBB = *PI;
+ if (MBB != TrueMBB && MBB != FalseMBB)
+ llvm_unreachable("Extra register needed to handle CFG");
}
- assert(exitingLoop == exitLoop);
}
+ DEBUG(
+ dbgs() << "result from improveSimpleJumpintoIf: ";
+ showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
+ );
- mergeLoopbreakBlock(exitingBlk, exitBlk, landBlk, initReg);
+ // update landBlk
+ *LandMBBPtr = LandBlk;
-} //handleLoopbreak
+ return NumNewBlk;
+}
-template<class PassT>
-void CFGStructurizer<PassT>::handleLoopcontBlock(BlockT *contingBlk,
- LoopT *contingLoop,
- BlockT *contBlk,
- LoopT *contLoop) {
- if (DEBUGME) {
- errs() << "loopcontPattern cont = BB" << contingBlk->getNumber()
- << " header = BB" << contBlk->getNumber() << "\n";
+void AMDGPUCFGStructurizer::handleLoopcontBlock(MachineBasicBlock *ContingMBB,
+ MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
+ MachineLoop *ContLoop) {
+ DEBUG(dbgs() << "loopcontPattern cont = BB" << ContingMBB->getNumber()
+ << " header = BB" << ContMBB->getNumber() << "\n";
+ dbgs() << "Trying to continue loop-depth = "
+ << getLoopDepth(ContLoop)
+ << " from loop-depth = " << getLoopDepth(ContingLoop) << "\n";);
+ settleLoopcontBlock(ContingMBB, ContMBB);
+}
- errs() << "Trying to continue loop-depth = "
- << getLoopDepth(contLoop)
- << " from loop-depth = " << getLoopDepth(contingLoop) << "\n";
- }
+void AMDGPUCFGStructurizer::mergeSerialBlock(MachineBasicBlock *DstMBB,
+ MachineBasicBlock *SrcMBB) {
+ DEBUG(
+ dbgs() << "serialPattern BB" << DstMBB->getNumber()
+ << " <= BB" << SrcMBB->getNumber() << "\n";
+ );
+ DstMBB->splice(DstMBB->end(), SrcMBB, SrcMBB->begin(), SrcMBB->end());
- RegiT initReg = INVALIDREGNUM;
- const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
- if (contingLoop != contLoop) {
- initReg = static_cast<int>
- (funcRep->getRegInfo().createVirtualRegister(I32RC));
- assert(initReg != INVALIDREGNUM);
- addLoopContInitReg(contLoop, initReg);
- while (contingLoop && contingLoop->getParentLoop() != contLoop) {
- addLoopBreakOnReg(contingLoop, initReg); //not addLoopContOnReg
- contingLoop = contingLoop->getParentLoop();
- }
- assert(contingLoop && contingLoop->getParentLoop() == contLoop);
- addLoopContOnReg(contingLoop, initReg);
- }
+ DstMBB->removeSuccessor(SrcMBB);
+ cloneSuccessorList(DstMBB, SrcMBB);
- settleLoopcontBlock(contingBlk, contBlk, initReg);
-} //handleLoopcontBlock
+ removeSuccessor(SrcMBB);
+ MLI->removeBlock(SrcMBB);
+ retireBlock(SrcMBB);
+}
-template<class PassT>
-void CFGStructurizer<PassT>::mergeSerialBlock(BlockT *dstBlk, BlockT *srcBlk) {
- if (DEBUGME) {
- errs() << "serialPattern BB" << dstBlk->getNumber()
- << " <= BB" << srcBlk->getNumber() << "\n";
- }
- dstBlk->splice(dstBlk->end(), srcBlk, srcBlk->begin(), srcBlk->end());
-
- dstBlk->removeSuccessor(srcBlk);
- CFGTraits::cloneSuccessorList(dstBlk, srcBlk);
-
- removeSuccessor(srcBlk);
- retireBlock(dstBlk, srcBlk);
-} //mergeSerialBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::mergeIfthenelseBlock(InstrT *branchInstr,
- BlockT *curBlk,
- BlockT *trueBlk,
- BlockT *falseBlk,
- BlockT *landBlk) {
- if (DEBUGME) {
- errs() << "ifPattern BB" << curBlk->getNumber();
- errs() << "{ ";
- if (trueBlk) {
- errs() << "BB" << trueBlk->getNumber();
- }
- errs() << " } else ";
- errs() << "{ ";
- if (falseBlk) {
- errs() << "BB" << falseBlk->getNumber();
- }
- errs() << " }\n ";
- errs() << "landBlock: ";
- if (landBlk == NULL) {
- errs() << "NULL";
+void AMDGPUCFGStructurizer::mergeIfthenelseBlock(MachineInstr *BranchMI,
+ MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
+ MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB) {
+ assert (TrueMBB);
+ DEBUG(
+ dbgs() << "ifPattern BB" << MBB->getNumber();
+ dbgs() << "{ ";
+ if (TrueMBB) {
+ dbgs() << "BB" << TrueMBB->getNumber();
+ }
+ dbgs() << " } else ";
+ dbgs() << "{ ";
+ if (FalseMBB) {
+ dbgs() << "BB" << FalseMBB->getNumber();
+ }
+ dbgs() << " }\n ";
+ dbgs() << "landBlock: ";
+ if (!LandMBB) {
+ dbgs() << "NULL";
} else {
- errs() << "BB" << landBlk->getNumber();
+ dbgs() << "BB" << LandMBB->getNumber();
}
- errs() << "\n";
- }
+ dbgs() << "\n";
+ );
- int oldOpcode = branchInstr->getOpcode();
- DebugLoc branchDL = branchInstr->getDebugLoc();
+ int OldOpcode = BranchMI->getOpcode();
+ DebugLoc BranchDL = BranchMI->getDebugLoc();
// transform to
// if cond
@@ -1391,1661 +1531,374 @@ void CFGStructurizer<PassT>::mergeIfthenelseBlock(InstrT *branchInstr,
// endif
// landBlk
- typename BlockT::iterator branchInstrPos =
- CFGTraits::getInstrPos(curBlk, branchInstr);
- CFGTraits::insertCondBranchBefore(branchInstrPos,
- CFGTraits::getBranchNzeroOpcode(oldOpcode),
- passRep,
- branchDL);
-
- if (trueBlk) {
- curBlk->splice(branchInstrPos, trueBlk, trueBlk->begin(), trueBlk->end());
- curBlk->removeSuccessor(trueBlk);
- if (landBlk && trueBlk->succ_size()!=0) {
- trueBlk->removeSuccessor(landBlk);
- }
- retireBlock(curBlk, trueBlk);
- }
- CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ELSE, passRep);
-
- if (falseBlk) {
- curBlk->splice(branchInstrPos, falseBlk, falseBlk->begin(),
- falseBlk->end());
- curBlk->removeSuccessor(falseBlk);
- if (landBlk && falseBlk->succ_size() != 0) {
- falseBlk->removeSuccessor(landBlk);
- }
- retireBlock(curBlk, falseBlk);
- }
- CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::ENDIF, passRep);
-
- branchInstr->eraseFromParent();
+ MachineBasicBlock::iterator I = BranchMI;
+ insertCondBranchBefore(I, getBranchNzeroOpcode(OldOpcode),
+ BranchDL);
- if (landBlk && trueBlk && falseBlk) {
- curBlk->addSuccessor(landBlk);
+ if (TrueMBB) {
+ MBB->splice(I, TrueMBB, TrueMBB->begin(), TrueMBB->end());
+ MBB->removeSuccessor(TrueMBB);
+ if (LandMBB && TrueMBB->succ_size()!=0)
+ TrueMBB->removeSuccessor(LandMBB);
+ retireBlock(TrueMBB);
+ MLI->removeBlock(TrueMBB);
}
-} //mergeIfthenelseBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::mergeLooplandBlock(BlockT *dstBlk,
- LoopLandInfo *loopLand) {
- BlockT *landBlk = loopLand->landBlk;
-
- if (DEBUGME) {
- errs() << "loopPattern header = BB" << dstBlk->getNumber()
- << " land = BB" << landBlk->getNumber() << "\n";
+ if (FalseMBB) {
+ insertInstrBefore(I, AMDGPU::ELSE);
+ MBB->splice(I, FalseMBB, FalseMBB->begin(),
+ FalseMBB->end());
+ MBB->removeSuccessor(FalseMBB);
+ if (LandMBB && FalseMBB->succ_size() != 0)
+ FalseMBB->removeSuccessor(LandMBB);
+ retireBlock(FalseMBB);
+ MLI->removeBlock(FalseMBB);
}
+ insertInstrBefore(I, AMDGPU::ENDIF);
- // Loop contInitRegs are init at the beginning of the loop.
- for (typename std::set<RegiT>::const_iterator iter =
- loopLand->contInitRegs.begin(),
- iterEnd = loopLand->contInitRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0);
- }
+ BranchMI->eraseFromParent();
- /* we last inserterd the DebugLoc in the
- * BREAK_LOGICALZ_i32 or AMDGPU::BREAK_LOGICALNZ statement in the current dstBlk.
- * search for the DebugLoc in the that statement.
- * if not found, we have to insert the empty/default DebugLoc */
- InstrT *loopBreakInstr = CFGTraits::getLoopBreakInstr(dstBlk);
- DebugLoc DLBreak = (loopBreakInstr) ? loopBreakInstr->getDebugLoc() : DebugLoc();
-
- CFGTraits::insertInstrBefore(dstBlk, AMDGPU::WHILELOOP, passRep, DLBreak);
- // Loop breakInitRegs are init before entering the loop.
- for (typename std::set<RegiT>::const_iterator iter =
- loopLand->breakInitRegs.begin(),
- iterEnd = loopLand->breakInitRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0);
- }
- // Loop endbranchInitRegs are init before entering the loop.
- for (typename std::set<RegiT>::const_iterator iter =
- loopLand->endbranchInitRegs.begin(),
- iterEnd = loopLand->endbranchInitRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertAssignInstrBefore(dstBlk, passRep, *iter, 0);
- }
+ if (LandMBB && TrueMBB && FalseMBB)
+ MBB->addSuccessor(LandMBB);
- /* we last inserterd the DebugLoc in the continue statement in the current dstBlk
- * search for the DebugLoc in the continue statement.
- * if not found, we have to insert the empty/default DebugLoc */
- InstrT *continueInstr = CFGTraits::getContinueInstr(dstBlk);
- DebugLoc DLContinue = (continueInstr) ? continueInstr->getDebugLoc() : DebugLoc();
-
- CFGTraits::insertInstrEnd(dstBlk, AMDGPU::ENDLOOP, passRep, DLContinue);
- // Loop breakOnRegs are check after the ENDLOOP: break the loop outside this
- // loop.
- for (typename std::set<RegiT>::const_iterator iter =
- loopLand->breakOnRegs.begin(),
- iterEnd = loopLand->breakOnRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertCondBranchEnd(dstBlk, AMDGPU::PREDICATED_BREAK, passRep,
- *iter);
- }
-
- // Loop contOnRegs are check after the ENDLOOP: cont the loop outside this
- // loop.
- for (std::set<RegiT>::const_iterator iter = loopLand->contOnRegs.begin(),
- iterEnd = loopLand->contOnRegs.end(); iter != iterEnd; ++iter) {
- CFGTraits::insertCondBranchEnd(dstBlk, AMDGPU::CONTINUE_LOGICALNZ_i32,
- passRep, *iter);
- }
-
- dstBlk->splice(dstBlk->end(), landBlk, landBlk->begin(), landBlk->end());
-
- for (typename BlockT::succ_iterator iter = landBlk->succ_begin(),
- iterEnd = landBlk->succ_end(); iter != iterEnd; ++iter) {
- dstBlk->addSuccessor(*iter); // *iter's predecessor is also taken care of.
- }
-
- removeSuccessor(landBlk);
- retireBlock(dstBlk, landBlk);
-} //mergeLooplandBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::reversePredicateSetter(typename BlockT::iterator I) {
- while (I--) {
- if (I->getOpcode() == AMDGPU::PRED_X) {
- switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) {
- case OPCODE_IS_ZERO_INT:
- static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_NOT_ZERO_INT);
- return;
- case OPCODE_IS_NOT_ZERO_INT:
- static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_ZERO_INT);
- return;
- case OPCODE_IS_ZERO:
- static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_NOT_ZERO);
- return;
- case OPCODE_IS_NOT_ZERO:
- static_cast<MachineInstr *>(I)->getOperand(2).setImm(OPCODE_IS_ZERO);
- return;
- default:
- assert(0 && "PRED_X Opcode invalid!");
- }
- }
- }
}
-template<class PassT>
-void CFGStructurizer<PassT>::mergeLoopbreakBlock(BlockT *exitingBlk,
- BlockT *exitBlk,
- BlockT *exitLandBlk,
- RegiT setReg) {
- if (DEBUGME) {
- errs() << "loopbreakPattern exiting = BB" << exitingBlk->getNumber()
- << " exit = BB" << exitBlk->getNumber()
- << " land = BB" << exitLandBlk->getNumber() << "\n";
- }
+void AMDGPUCFGStructurizer::mergeLooplandBlock(MachineBasicBlock *DstBlk,
+ MachineBasicBlock *LandMBB) {
+ DEBUG(dbgs() << "loopPattern header = BB" << DstBlk->getNumber()
+ << " land = BB" << LandMBB->getNumber() << "\n";);
- InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(exitingBlk);
- assert(branchInstr && CFGTraits::isCondBranch(branchInstr));
-
- DebugLoc DL = branchInstr->getDebugLoc();
-
- BlockT *trueBranch = CFGTraits::getTrueBranch(branchInstr);
-
- // transform exitingBlk to
- // if ( ) {
- // exitBlk (if exitBlk != exitLandBlk)
- // setReg = 1
- // break
- // }endif
- // successor = {orgSuccessor(exitingBlk) - exitBlk}
-
- typename BlockT::iterator branchInstrPos =
- CFGTraits::getInstrPos(exitingBlk, branchInstr);
-
- if (exitBlk == exitLandBlk && setReg == INVALIDREGNUM) {
- //break_logical
+ insertInstrBefore(DstBlk, AMDGPU::WHILELOOP, DebugLoc());
+ insertInstrEnd(DstBlk, AMDGPU::ENDLOOP, DebugLoc());
+ DstBlk->addSuccessor(LandMBB);
+ DstBlk->removeSuccessor(DstBlk);
+}
- if (trueBranch != exitBlk) {
- reversePredicateSetter(branchInstrPos);
- }
- CFGTraits::insertCondBranchBefore(branchInstrPos, AMDGPU::PREDICATED_BREAK, passRep, DL);
- } else {
- if (trueBranch != exitBlk) {
- reversePredicateSetter(branchInstr);
- }
- CFGTraits::insertCondBranchBefore(branchInstrPos, AMDGPU::PREDICATED_BREAK, passRep, DL);
- if (exitBlk != exitLandBlk) {
- //splice is insert-before ...
- exitingBlk->splice(branchInstrPos, exitBlk, exitBlk->begin(),
- exitBlk->end());
- }
- if (setReg != INVALIDREGNUM) {
- CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1);
- }
- CFGTraits::insertInstrBefore(branchInstrPos, AMDGPU::BREAK, passRep);
- } //if_logical
+void AMDGPUCFGStructurizer::mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
+ MachineBasicBlock *LandMBB) {
+ DEBUG(dbgs() << "loopbreakPattern exiting = BB" << ExitingMBB->getNumber()
+ << " land = BB" << LandMBB->getNumber() << "\n";);
+ MachineInstr *BranchMI = getLoopendBlockBranchInstr(ExitingMBB);
+ assert(BranchMI && isCondBranch(BranchMI));
+ DebugLoc DL = BranchMI->getDebugLoc();
+ MachineBasicBlock *TrueBranch = getTrueBranch(BranchMI);
+ MachineBasicBlock::iterator I = BranchMI;
+ if (TrueBranch != LandMBB)
+ reversePredicateSetter(I);
+ insertCondBranchBefore(ExitingMBB, I, AMDGPU::IF_PREDICATE_SET, AMDGPU::PREDICATE_BIT, DL);
+ insertInstrBefore(I, AMDGPU::BREAK);
+ insertInstrBefore(I, AMDGPU::ENDIF);
//now branchInst can be erase safely
- branchInstr->eraseFromParent();
-
+ BranchMI->eraseFromParent();
//now take care of successors, retire blocks
- exitingBlk->removeSuccessor(exitBlk);
- if (exitBlk != exitLandBlk) {
- //splice is insert-before ...
- exitBlk->removeSuccessor(exitLandBlk);
- retireBlock(exitingBlk, exitBlk);
- }
-
-} //mergeLoopbreakBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::settleLoopcontBlock(BlockT *contingBlk,
- BlockT *contBlk,
- RegiT setReg) {
- if (DEBUGME) {
- errs() << "settleLoopcontBlock conting = BB"
- << contingBlk->getNumber()
- << ", cont = BB" << contBlk->getNumber() << "\n";
- }
-
- InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(contingBlk);
- if (branchInstr) {
- assert(CFGTraits::isCondBranch(branchInstr));
- typename BlockT::iterator branchInstrPos =
- CFGTraits::getInstrPos(contingBlk, branchInstr);
- BlockT *trueBranch = CFGTraits::getTrueBranch(branchInstr);
- int oldOpcode = branchInstr->getOpcode();
- DebugLoc DL = branchInstr->getDebugLoc();
-
- // transform contingBlk to
- // if () {
- // move instr after branchInstr
- // continue
- // or
- // setReg = 1
- // break
- // }endif
- // successor = {orgSuccessor(contingBlk) - loopHeader}
-
- bool useContinueLogical =
- (setReg == INVALIDREGNUM && (&*contingBlk->rbegin()) == branchInstr);
-
- if (useContinueLogical == false) {
- int branchOpcode =
- trueBranch == contBlk ? CFGTraits::getBranchNzeroOpcode(oldOpcode)
- : CFGTraits::getBranchZeroOpcode(oldOpcode);
-
- CFGTraits::insertCondBranchBefore(branchInstrPos, branchOpcode, passRep, DL);
-
- if (setReg != INVALIDREGNUM) {
- CFGTraits::insertAssignInstrBefore(branchInstrPos, passRep, setReg, 1);
- // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDGPU::BREAK, passRep, DL);
- } else {
- // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDGPU::CONTINUE, passRep, DL);
- }
+ ExitingMBB->removeSuccessor(LandMBB);
+}
- CFGTraits::insertInstrEnd(contingBlk, AMDGPU::ENDIF, passRep, DL);
+void AMDGPUCFGStructurizer::settleLoopcontBlock(MachineBasicBlock *ContingMBB,
+ MachineBasicBlock *ContMBB) {
+ DEBUG(dbgs() << "settleLoopcontBlock conting = BB"
+ << ContingMBB->getNumber()
+ << ", cont = BB" << ContMBB->getNumber() << "\n";);
+
+ MachineInstr *MI = getLoopendBlockBranchInstr(ContingMBB);
+ if (MI) {
+ assert(isCondBranch(MI));
+ MachineBasicBlock::iterator I = MI;
+ MachineBasicBlock *TrueBranch = getTrueBranch(MI);
+ int OldOpcode = MI->getOpcode();
+ DebugLoc DL = MI->getDebugLoc();
+
+ bool UseContinueLogical = ((&*ContingMBB->rbegin()) == MI);
+
+ if (UseContinueLogical == false) {
+ int BranchOpcode =
+ TrueBranch == ContMBB ? getBranchNzeroOpcode(OldOpcode) :
+ getBranchZeroOpcode(OldOpcode);
+ insertCondBranchBefore(I, BranchOpcode, DL);
+ // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
+ insertInstrEnd(ContingMBB, AMDGPU::CONTINUE, DL);
+ insertInstrEnd(ContingMBB, AMDGPU::ENDIF, DL);
} else {
- int branchOpcode =
- trueBranch == contBlk ? CFGTraits::getContinueNzeroOpcode(oldOpcode)
- : CFGTraits::getContinueZeroOpcode(oldOpcode);
-
- CFGTraits::insertCondBranchBefore(branchInstrPos, branchOpcode, passRep, DL);
+ int BranchOpcode =
+ TrueBranch == ContMBB ? getContinueNzeroOpcode(OldOpcode) :
+ getContinueZeroOpcode(OldOpcode);
+ insertCondBranchBefore(I, BranchOpcode, DL);
}
- branchInstr->eraseFromParent();
+ MI->eraseFromParent();
} else {
// if we've arrived here then we've already erased the branch instruction
- // travel back up the basic block to see the last reference of our debug location
- // we've just inserted that reference here so it should be representative
- if (setReg != INVALIDREGNUM) {
- CFGTraits::insertAssignInstrBefore(contingBlk, passRep, setReg, 1);
- // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDGPU::BREAK, passRep, CFGTraits::getLastDebugLocInBB(contingBlk));
- } else {
- // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
- CFGTraits::insertInstrEnd(contingBlk, AMDGPU::CONTINUE, passRep, CFGTraits::getLastDebugLocInBB(contingBlk));
- }
- } //else
-
-} //settleLoopcontBlock
-
-// BBs in exitBlkSet are determined as in break-path for loopRep,
-// before we can put code for BBs as inside loop-body for loopRep
-// check whether those BBs are determined as cont-BB for parentLoopRep
-// earlier.
-// If so, generate a new BB newBlk
-// (1) set newBlk common successor of BBs in exitBlkSet
-// (2) change the continue-instr in BBs in exitBlkSet to break-instr
-// (3) generate continue-instr in newBlk
-//
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::relocateLoopcontBlock(LoopT *parentLoopRep,
- LoopT *loopRep,
- std::set<BlockT *> &exitBlkSet,
- BlockT *exitLandBlk) {
- std::set<BlockT *> endBlkSet;
-
-
-
- for (typename std::set<BlockT *>::const_iterator iter = exitBlkSet.begin(),
- iterEnd = exitBlkSet.end();
- iter != iterEnd; ++iter) {
- BlockT *exitBlk = *iter;
- BlockT *endBlk = singlePathEnd(exitBlk, exitLandBlk);
-
- if (endBlk == NULL || CFGTraits::getContinueInstr(endBlk) == NULL)
- return NULL;
-
- endBlkSet.insert(endBlk);
- }
-
- BlockT *newBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(newBlk); //insert to function
- CFGTraits::insertInstrEnd(newBlk, AMDGPU::CONTINUE, passRep);
- SHOWNEWBLK(newBlk, "New continue block: ");
-
- for (typename std::set<BlockT*>::const_iterator iter = endBlkSet.begin(),
- iterEnd = endBlkSet.end();
- iter != iterEnd; ++iter) {
- BlockT *endBlk = *iter;
- InstrT *contInstr = CFGTraits::getContinueInstr(endBlk);
- if (contInstr) {
- contInstr->eraseFromParent();
- }
- endBlk->addSuccessor(newBlk);
- if (DEBUGME) {
- errs() << "Add new continue Block to BB"
- << endBlk->getNumber() << " successors\n";
- }
- }
-
- return newBlk;
-} //relocateLoopcontBlock
-
-
-// LoopEndbranchBlock is a BB created by the CFGStructurizer to use as
-// LoopLandBlock. This BB branch on the loop endBranchInit register to the
-// pathes corresponding to the loop exiting branches.
-
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::addLoopEndbranchBlock(LoopT *loopRep,
- BlockTSmallerVector &exitingBlks,
- BlockTSmallerVector &exitBlks) {
- const AMDGPUInstrInfo *tii =
- static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
- const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
-
- RegiT endBranchReg = static_cast<int>
- (funcRep->getRegInfo().createVirtualRegister(I32RC));
- assert(endBranchReg >= 0);
-
- // reg = 0 before entering the loop
- addLoopEndbranchInitReg(loopRep, endBranchReg);
-
- uint32_t numBlks = static_cast<uint32_t>(exitingBlks.size());
- assert(numBlks >=2 && numBlks == exitBlks.size());
-
- BlockT *preExitingBlk = exitingBlks[0];
- BlockT *preExitBlk = exitBlks[0];
- BlockT *preBranchBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(preBranchBlk); //insert to function
- SHOWNEWBLK(preBranchBlk, "New loopEndbranch block: ");
-
- BlockT *newLandBlk = preBranchBlk;
-
- CFGTraits::replaceInstrUseOfBlockWith(preExitingBlk, preExitBlk,
- newLandBlk);
- preExitingBlk->removeSuccessor(preExitBlk);
- preExitingBlk->addSuccessor(newLandBlk);
-
- //it is redundant to add reg = 0 to exitingBlks[0]
-
- // For 1..n th exiting path (the last iteration handles two pathes) create the
- // branch to the previous path and the current path.
- for (uint32_t i = 1; i < numBlks; ++i) {
- BlockT *curExitingBlk = exitingBlks[i];
- BlockT *curExitBlk = exitBlks[i];
- BlockT *curBranchBlk;
-
- if (i == numBlks - 1) {
- curBranchBlk = curExitBlk;
- } else {
- curBranchBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(curBranchBlk); //insert to function
- SHOWNEWBLK(curBranchBlk, "New loopEndbranch block: ");
- }
-
- // Add reg = i to exitingBlks[i].
- CFGTraits::insertAssignInstrBefore(curExitingBlk, passRep,
- endBranchReg, i);
-
- // Remove the edge (exitingBlks[i] exitBlks[i]) add new edge
- // (exitingBlks[i], newLandBlk).
- CFGTraits::replaceInstrUseOfBlockWith(curExitingBlk, curExitBlk,
- newLandBlk);
- curExitingBlk->removeSuccessor(curExitBlk);
- curExitingBlk->addSuccessor(newLandBlk);
-
- // add to preBranchBlk the branch instruction:
- // if (endBranchReg == preVal)
- // preExitBlk
- // else
- // curBranchBlk
- //
- // preValReg = i - 1
-
- DebugLoc DL;
- RegiT preValReg = static_cast<int>
- (funcRep->getRegInfo().createVirtualRegister(I32RC));
-
- preBranchBlk->insert(preBranchBlk->begin(),
- tii->getMovImmInstr(preBranchBlk->getParent(), preValReg,
- i - 1));
-
- // condResReg = (endBranchReg == preValReg)
- RegiT condResReg = static_cast<int>
- (funcRep->getRegInfo().createVirtualRegister(I32RC));
- BuildMI(preBranchBlk, DL, tii->get(tii->getIEQOpcode()), condResReg)
- .addReg(endBranchReg).addReg(preValReg);
-
- BuildMI(preBranchBlk, DL, tii->get(AMDGPU::BRANCH_COND_i32))
- .addMBB(preExitBlk).addReg(condResReg);
-
- preBranchBlk->addSuccessor(preExitBlk);
- preBranchBlk->addSuccessor(curBranchBlk);
-
- // Update preExitingBlk, preExitBlk, preBranchBlk.
- preExitingBlk = curExitingBlk;
- preExitBlk = curExitBlk;
- preBranchBlk = curBranchBlk;
-
- } //end for 1 .. n blocks
-
- return newLandBlk;
-} //addLoopEndbranchBlock
-
-template<class PassT>
-typename CFGStructurizer<PassT>::PathToKind
-CFGStructurizer<PassT>::singlePathTo(BlockT *srcBlk, BlockT *dstBlk,
- bool allowSideEntry) {
- assert(dstBlk);
-
- if (srcBlk == dstBlk) {
- return SinglePath_InPath;
+ // travel back up the basic block to see the last reference of our debug
+ // location we've just inserted that reference here so it should be
+ // representative insertEnd to ensure phi-moves, if exist, go before the
+ // continue-instr.
+ insertInstrEnd(ContingMBB, AMDGPU::CONTINUE,
+ getLastDebugLocInBB(ContingMBB));
}
+}
- while (srcBlk && srcBlk->succ_size() == 1) {
- srcBlk = *srcBlk->succ_begin();
- if (srcBlk == dstBlk) {
- return SinglePath_InPath;
- }
-
- if (!allowSideEntry && srcBlk->pred_size() > 1) {
- return Not_SinglePath;
- }
- }
-
- if (srcBlk && srcBlk->succ_size()==0) {
- return SinglePath_NotInPath;
- }
-
- return Not_SinglePath;
-} //singlePathTo
-
-// If there is a single path from srcBlk to dstBlk, return the last block before
-// dstBlk If there is a single path from srcBlk->end without dstBlk, return the
-// last block in the path Otherwise, return NULL
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::singlePathEnd(BlockT *srcBlk, BlockT *dstBlk,
- bool allowSideEntry) {
- assert(dstBlk);
-
- if (srcBlk == dstBlk) {
- return srcBlk;
- }
-
- if (srcBlk->succ_size() == 0) {
- return srcBlk;
- }
-
- while (srcBlk && srcBlk->succ_size() == 1) {
- BlockT *preBlk = srcBlk;
-
- srcBlk = *srcBlk->succ_begin();
- if (srcBlk == NULL) {
- return preBlk;
- }
-
- if (!allowSideEntry && srcBlk->pred_size() > 1) {
- return NULL;
- }
- }
-
- if (srcBlk && srcBlk->succ_size()==0) {
- return srcBlk;
- }
-
- return NULL;
-
-} //singlePathEnd
-
-template<class PassT>
-int CFGStructurizer<PassT>::cloneOnSideEntryTo(BlockT *preBlk, BlockT *srcBlk,
- BlockT *dstBlk) {
- int cloned = 0;
- assert(preBlk->isSuccessor(srcBlk));
- while (srcBlk && srcBlk != dstBlk) {
- assert(srcBlk->succ_size() == 1);
- if (srcBlk->pred_size() > 1) {
- srcBlk = cloneBlockForPredecessor(srcBlk, preBlk);
- ++cloned;
+int AMDGPUCFGStructurizer::cloneOnSideEntryTo(MachineBasicBlock *PreMBB,
+ MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB) {
+ int Cloned = 0;
+ assert(PreMBB->isSuccessor(SrcMBB));
+ while (SrcMBB && SrcMBB != DstMBB) {
+ assert(SrcMBB->succ_size() == 1);
+ if (SrcMBB->pred_size() > 1) {
+ SrcMBB = cloneBlockForPredecessor(SrcMBB, PreMBB);
+ ++Cloned;
}
- preBlk = srcBlk;
- srcBlk = *srcBlk->succ_begin();
+ PreMBB = SrcMBB;
+ SrcMBB = *SrcMBB->succ_begin();
}
- return cloned;
-} //cloneOnSideEntryTo
+ return Cloned;
+}
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::cloneBlockForPredecessor(BlockT *curBlk,
- BlockT *predBlk) {
- assert(predBlk->isSuccessor(curBlk) &&
+MachineBasicBlock *
+AMDGPUCFGStructurizer::cloneBlockForPredecessor(MachineBasicBlock *MBB,
+ MachineBasicBlock *PredMBB) {
+ assert(PredMBB->isSuccessor(MBB) &&
"succBlk is not a prececessor of curBlk");
- BlockT *cloneBlk = CFGTraits::clone(curBlk); //clone instructions
- CFGTraits::replaceInstrUseOfBlockWith(predBlk, curBlk, cloneBlk);
+ MachineBasicBlock *CloneMBB = clone(MBB); //clone instructions
+ replaceInstrUseOfBlockWith(PredMBB, MBB, CloneMBB);
//srcBlk, oldBlk, newBlk
- predBlk->removeSuccessor(curBlk);
- predBlk->addSuccessor(cloneBlk);
+ PredMBB->removeSuccessor(MBB);
+ PredMBB->addSuccessor(CloneMBB);
// add all successor to cloneBlk
- CFGTraits::cloneSuccessorList(cloneBlk, curBlk);
+ cloneSuccessorList(CloneMBB, MBB);
- numClonedInstr += curBlk->size();
+ numClonedInstr += MBB->size();
- if (DEBUGME) {
- errs() << "Cloned block: " << "BB"
- << curBlk->getNumber() << "size " << curBlk->size() << "\n";
- }
+ DEBUG(
+ dbgs() << "Cloned block: " << "BB"
+ << MBB->getNumber() << "size " << MBB->size() << "\n";
+ );
- SHOWNEWBLK(cloneBlk, "result of Cloned block: ");
+ SHOWNEWBLK(CloneMBB, "result of Cloned block: ");
- return cloneBlk;
-} //cloneBlockForPredecessor
-
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::exitingBlock2ExitBlock(LoopT *loopRep,
- BlockT *exitingBlk) {
- BlockT *exitBlk = NULL;
-
- for (typename BlockT::succ_iterator iterSucc = exitingBlk->succ_begin(),
- iterSuccEnd = exitingBlk->succ_end();
- iterSucc != iterSuccEnd; ++iterSucc) {
- BlockT *curBlk = *iterSucc;
- if (!loopRep->contains(curBlk)) {
- assert(exitBlk == NULL);
- exitBlk = curBlk;
- }
- }
-
- assert(exitBlk != NULL);
-
- return exitBlk;
-} //exitingBlock2ExitBlock
+ return CloneMBB;
+}
-template<class PassT>
-void CFGStructurizer<PassT>::migrateInstruction(BlockT *srcBlk,
- BlockT *dstBlk,
- InstrIterator insertPos) {
- InstrIterator spliceEnd;
+void AMDGPUCFGStructurizer::migrateInstruction(MachineBasicBlock *SrcMBB,
+ MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I) {
+ MachineBasicBlock::iterator SpliceEnd;
//look for the input branchinstr, not the AMDGPU branchinstr
- InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(srcBlk);
- if (branchInstr == NULL) {
- if (DEBUGME) {
- errs() << "migrateInstruction don't see branch instr\n" ;
- }
- spliceEnd = srcBlk->end();
+ MachineInstr *BranchMI = getNormalBlockBranchInstr(SrcMBB);
+ if (!BranchMI) {
+ DEBUG(
+ dbgs() << "migrateInstruction don't see branch instr\n" ;
+ );
+ SpliceEnd = SrcMBB->end();
} else {
- if (DEBUGME) {
- errs() << "migrateInstruction see branch instr\n" ;
- branchInstr->dump();
- }
- spliceEnd = CFGTraits::getInstrPos(srcBlk, branchInstr);
- }
- if (DEBUGME) {
- errs() << "migrateInstruction before splice dstSize = " << dstBlk->size()
- << "srcSize = " << srcBlk->size() << "\n";
+ DEBUG(
+ dbgs() << "migrateInstruction see branch instr\n" ;
+ BranchMI->dump();
+ );
+ SpliceEnd = BranchMI;
}
+ DEBUG(
+ dbgs() << "migrateInstruction before splice dstSize = " << DstMBB->size()
+ << "srcSize = " << SrcMBB->size() << "\n";
+ );
//splice insert before insertPos
- dstBlk->splice(insertPos, srcBlk, srcBlk->begin(), spliceEnd);
+ DstMBB->splice(I, SrcMBB, SrcMBB->begin(), SpliceEnd);
- if (DEBUGME) {
- errs() << "migrateInstruction after splice dstSize = " << dstBlk->size()
- << "srcSize = " << srcBlk->size() << "\n";
- }
-} //migrateInstruction
+ DEBUG(
+ dbgs() << "migrateInstruction after splice dstSize = " << DstMBB->size()
+ << "srcSize = " << SrcMBB->size() << "\n";
+ );
+}
-// normalizeInfiniteLoopExit change
-// B1:
-// uncond_br LoopHeader
-//
-// to
-// B1:
-// cond_br 1 LoopHeader dummyExit
-// and return the newly added dummy exit block
-//
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::normalizeInfiniteLoopExit(LoopT* LoopRep) {
- BlockT *loopHeader;
- BlockT *loopLatch;
- loopHeader = LoopRep->getHeader();
- loopLatch = LoopRep->getLoopLatch();
- BlockT *dummyExitBlk = NULL;
+MachineBasicBlock *
+AMDGPUCFGStructurizer::normalizeInfiniteLoopExit(MachineLoop* LoopRep) {
+ MachineBasicBlock *LoopHeader = LoopRep->getHeader();
+ MachineBasicBlock *LoopLatch = LoopRep->getLoopLatch();
const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
- if (loopHeader!=NULL && loopLatch!=NULL) {
- InstrT *branchInstr = CFGTraits::getLoopendBlockBranchInstr(loopLatch);
- if (branchInstr!=NULL && CFGTraits::isUncondBranch(branchInstr)) {
- dummyExitBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(dummyExitBlk); //insert to function
- SHOWNEWBLK(dummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
-
- if (DEBUGME) errs() << "Old branch instr: " << *branchInstr << "\n";
-
- typename BlockT::iterator insertPos =
- CFGTraits::getInstrPos(loopLatch, branchInstr);
- unsigned immReg =
- funcRep->getRegInfo().createVirtualRegister(I32RC);
- CFGTraits::insertAssignInstrBefore(insertPos, passRep, immReg, 1);
- InstrT *newInstr =
- CFGTraits::insertInstrBefore(insertPos, AMDGPU::BRANCH_COND_i32, passRep);
- MachineInstrBuilder MIB(*funcRep, newInstr);
- MIB.addMBB(loopHeader);
- MIB.addReg(immReg, false);
-
- SHOWNEWINSTR(newInstr);
-
- branchInstr->eraseFromParent();
- loopLatch->addSuccessor(dummyExitBlk);
- }
- }
- return dummyExitBlk;
-} //normalizeInfiniteLoopExit
+ if (!LoopHeader || !LoopLatch)
+ return NULL;
+ MachineInstr *BranchMI = getLoopendBlockBranchInstr(LoopLatch);
+ // Is LoopRep an infinite loop ?
+ if (!BranchMI || !isUncondBranch(BranchMI))
+ return NULL;
-template<class PassT>
-void CFGStructurizer<PassT>::removeUnconditionalBranch(BlockT *srcBlk) {
- InstrT *branchInstr;
+ MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
+ FuncRep->push_back(DummyExitBlk); //insert to function
+ SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
+ DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";);
+ MachineBasicBlock::iterator I = BranchMI;
+ unsigned ImmReg = FuncRep->getRegInfo().createVirtualRegister(I32RC);
+ llvm_unreachable("Extra register needed to handle CFG");
+ MachineInstr *NewMI = insertInstrBefore(I, AMDGPU::BRANCH_COND_i32);
+ MachineInstrBuilder MIB(*FuncRep, NewMI);
+ MIB.addMBB(LoopHeader);
+ MIB.addReg(ImmReg, false);
+ SHOWNEWINSTR(NewMI);
+ BranchMI->eraseFromParent();
+ LoopLatch->addSuccessor(DummyExitBlk);
+
+ return DummyExitBlk;
+}
+
+void AMDGPUCFGStructurizer::removeUnconditionalBranch(MachineBasicBlock *MBB) {
+ MachineInstr *BranchMI;
// I saw two unconditional branch in one basic block in example
// test_fc_do_while_or.c need to fix the upstream on this to remove the loop.
- while ((branchInstr = CFGTraits::getLoopendBlockBranchInstr(srcBlk))
- && CFGTraits::isUncondBranch(branchInstr)) {
- if (DEBUGME) {
- errs() << "Removing unconditional branch instruction" ;
- branchInstr->dump();
- }
- branchInstr->eraseFromParent();
- }
-} //removeUnconditionalBranch
-
-template<class PassT>
-void CFGStructurizer<PassT>::removeRedundantConditionalBranch(BlockT *srcBlk) {
- if (srcBlk->succ_size() == 2) {
- BlockT *blk1 = *srcBlk->succ_begin();
- BlockT *blk2 = *(++srcBlk->succ_begin());
-
- if (blk1 == blk2) {
- InstrT *branchInstr = CFGTraits::getNormalBlockBranchInstr(srcBlk);
- assert(branchInstr && CFGTraits::isCondBranch(branchInstr));
- if (DEBUGME) {
- errs() << "Removing unneeded conditional branch instruction" ;
- branchInstr->dump();
- }
- branchInstr->eraseFromParent();
- SHOWNEWBLK(blk1, "Removing redundant successor");
- srcBlk->removeSuccessor(blk1);
- }
- }
-} //removeRedundantConditionalBranch
-
-template<class PassT>
-void CFGStructurizer<PassT>::addDummyExitBlock(SmallVector<BlockT*,
- DEFAULT_VEC_SLOTS> &retBlks) {
- BlockT *dummyExitBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(dummyExitBlk); //insert to function
- CFGTraits::insertInstrEnd(dummyExitBlk, AMDGPU::RETURN, passRep);
-
- for (typename SmallVector<BlockT *, DEFAULT_VEC_SLOTS>::iterator iter =
- retBlks.begin(),
- iterEnd = retBlks.end(); iter != iterEnd; ++iter) {
- BlockT *curBlk = *iter;
- InstrT *curInstr = CFGTraits::getReturnInstr(curBlk);
- if (curInstr) {
- curInstr->eraseFromParent();
- }
- curBlk->addSuccessor(dummyExitBlk);
- if (DEBUGME) {
- errs() << "Add dummyExitBlock to BB" << curBlk->getNumber()
- << " successors\n";
- }
- } //for
-
- SHOWNEWBLK(dummyExitBlk, "DummyExitBlock: ");
-} //addDummyExitBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::removeSuccessor(BlockT *srcBlk) {
- while (srcBlk->succ_size()) {
- srcBlk->removeSuccessor(*srcBlk->succ_begin());
+ while ((BranchMI = getLoopendBlockBranchInstr(MBB))
+ && isUncondBranch(BranchMI)) {
+ DEBUG(dbgs() << "Removing uncond branch instr"; BranchMI->dump(););
+ BranchMI->eraseFromParent();
}
}
-template<class PassT>
-void CFGStructurizer<PassT>::recordSccnum(BlockT *srcBlk, int sccNum) {
- BlockInfo *&srcBlkInfo = blockInfoMap[srcBlk];
+void AMDGPUCFGStructurizer::removeRedundantConditionalBranch(
+ MachineBasicBlock *MBB) {
+ if (MBB->succ_size() != 2)
+ return;
+ MachineBasicBlock *MBB1 = *MBB->succ_begin();
+ MachineBasicBlock *MBB2 = *llvm::next(MBB->succ_begin());
+ if (MBB1 != MBB2)
+ return;
+
+ MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
+ assert(BranchMI && isCondBranch(BranchMI));
+ DEBUG(dbgs() << "Removing unneeded cond branch instr"; BranchMI->dump(););
+ BranchMI->eraseFromParent();
+ SHOWNEWBLK(MBB1, "Removing redundant successor");
+ MBB->removeSuccessor(MBB1);
+}
- if (srcBlkInfo == NULL) {
- srcBlkInfo = new BlockInfo();
+void AMDGPUCFGStructurizer::addDummyExitBlock(
+ SmallVectorImpl<MachineBasicBlock*> &RetMBB) {
+ MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
+ FuncRep->push_back(DummyExitBlk); //insert to function
+ insertInstrEnd(DummyExitBlk, AMDGPU::RETURN);
+
+ for (SmallVectorImpl<MachineBasicBlock *>::iterator It = RetMBB.begin(),
+ E = RetMBB.end(); It != E; ++It) {
+ MachineBasicBlock *MBB = *It;
+ MachineInstr *MI = getReturnInstr(MBB);
+ if (MI)
+ MI->eraseFromParent();
+ MBB->addSuccessor(DummyExitBlk);
+ DEBUG(
+ dbgs() << "Add dummyExitBlock to BB" << MBB->getNumber()
+ << " successors\n";
+ );
}
+ SHOWNEWBLK(DummyExitBlk, "DummyExitBlock: ");
+}
- srcBlkInfo->sccNum = sccNum;
+void AMDGPUCFGStructurizer::removeSuccessor(MachineBasicBlock *MBB) {
+ while (MBB->succ_size())
+ MBB->removeSuccessor(*MBB->succ_begin());
}
-template<class PassT>
-int CFGStructurizer<PassT>::getSCCNum(BlockT *srcBlk) {
- BlockInfo *srcBlkInfo = blockInfoMap[srcBlk];
- return srcBlkInfo ? srcBlkInfo->sccNum : INVALIDSCCNUM;
+void AMDGPUCFGStructurizer::recordSccnum(MachineBasicBlock *MBB,
+ int SccNum) {
+ BlockInformation *&srcBlkInfo = BlockInfoMap[MBB];
+ if (!srcBlkInfo)
+ srcBlkInfo = new BlockInformation();
+ srcBlkInfo->SccNum = SccNum;
}
-template<class PassT>
-void CFGStructurizer<PassT>::retireBlock(BlockT *dstBlk, BlockT *srcBlk) {
- if (DEBUGME) {
- errs() << "Retiring BB" << srcBlk->getNumber() << "\n";
- }
+void AMDGPUCFGStructurizer::retireBlock(MachineBasicBlock *MBB) {
+ DEBUG(
+ dbgs() << "Retiring BB" << MBB->getNumber() << "\n";
+ );
- BlockInfo *&srcBlkInfo = blockInfoMap[srcBlk];
+ BlockInformation *&SrcBlkInfo = BlockInfoMap[MBB];
- if (srcBlkInfo == NULL) {
- srcBlkInfo = new BlockInfo();
- }
+ if (!SrcBlkInfo)
+ SrcBlkInfo = new BlockInformation();
- srcBlkInfo->isRetired = true;
- assert(srcBlk->succ_size() == 0 && srcBlk->pred_size() == 0
+ SrcBlkInfo->IsRetired = true;
+ assert(MBB->succ_size() == 0 && MBB->pred_size() == 0
&& "can't retire block yet");
}
-template<class PassT>
-bool CFGStructurizer<PassT>::isRetiredBlock(BlockT *srcBlk) {
- BlockInfo *srcBlkInfo = blockInfoMap[srcBlk];
- return (srcBlkInfo && srcBlkInfo->isRetired);
-}
-
-template<class PassT>
-bool CFGStructurizer<PassT>::isActiveLoophead(BlockT *curBlk) {
- LoopT *loopRep = loopInfo->getLoopFor(curBlk);
- while (loopRep && loopRep->getHeader() == curBlk) {
- LoopLandInfo *loopLand = getLoopLandInfo(loopRep);
-
- if(loopLand == NULL)
- return true;
-
- BlockT *landBlk = loopLand->landBlk;
- assert(landBlk);
- if (!isRetiredBlock(landBlk)) {
- return true;
- }
-
- loopRep = loopRep->getParentLoop();
- }
-
- return false;
-} //isActiveLoophead
-
-template<class PassT>
-bool CFGStructurizer<PassT>::needMigrateBlock(BlockT *blk) {
- const unsigned blockSizeThreshold = 30;
- const unsigned cloneInstrThreshold = 100;
-
- bool multiplePreds = blk && (blk->pred_size() > 1);
-
- if(!multiplePreds)
- return false;
-
- unsigned blkSize = blk->size();
- return ((blkSize > blockSizeThreshold)
- && (blkSize * (blk->pred_size() - 1) > cloneInstrThreshold));
-} //needMigrateBlock
-
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::recordLoopLandBlock(LoopT *loopRep, BlockT *landBlk,
- BlockTSmallerVector &exitBlks,
- std::set<BlockT *> &exitBlkSet) {
- SmallVector<BlockT *, DEFAULT_VEC_SLOTS> inpathBlks; //in exit path blocks
-
- for (typename BlockT::pred_iterator predIter = landBlk->pred_begin(),
- predIterEnd = landBlk->pred_end();
- predIter != predIterEnd; ++predIter) {
- BlockT *curBlk = *predIter;
- if (loopRep->contains(curBlk) || exitBlkSet.count(curBlk)) {
- inpathBlks.push_back(curBlk);
- }
- } //for
-
- //if landBlk has predecessors that are not in the given loop,
- //create a new block
- BlockT *newLandBlk = landBlk;
- if (inpathBlks.size() != landBlk->pred_size()) {
- newLandBlk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(newLandBlk); //insert to function
- newLandBlk->addSuccessor(landBlk);
- for (typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::iterator iter =
- inpathBlks.begin(),
- iterEnd = inpathBlks.end(); iter != iterEnd; ++iter) {
- BlockT *curBlk = *iter;
- CFGTraits::replaceInstrUseOfBlockWith(curBlk, landBlk, newLandBlk);
- //srcBlk, oldBlk, newBlk
- curBlk->removeSuccessor(landBlk);
- curBlk->addSuccessor(newLandBlk);
- }
- for (size_t i = 0, tot = exitBlks.size(); i < tot; ++i) {
- if (exitBlks[i] == landBlk) {
- exitBlks[i] = newLandBlk;
- }
- }
- SHOWNEWBLK(newLandBlk, "NewLandingBlock: ");
- }
-
- setLoopLandBlock(loopRep, newLandBlk);
-
- return newLandBlk;
-} // recordLoopbreakLand
-
-template<class PassT>
-void CFGStructurizer<PassT>::setLoopLandBlock(LoopT *loopRep, BlockT *blk) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
- assert(theEntry->landBlk == NULL);
-
- if (blk == NULL) {
- blk = funcRep->CreateMachineBasicBlock();
- funcRep->push_back(blk); //insert to function
- SHOWNEWBLK(blk, "DummyLandingBlock for loop without break: ");
- }
-
- theEntry->landBlk = blk;
-
- if (DEBUGME) {
- errs() << "setLoopLandBlock loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " landing-block = BB" << blk->getNumber() << "\n";
- }
-} // setLoopLandBlock
-
-template<class PassT>
-void CFGStructurizer<PassT>::addLoopBreakOnReg(LoopT *loopRep, RegiT regNum) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
-
- theEntry->breakOnRegs.insert(regNum);
-
- if (DEBUGME) {
- errs() << "addLoopBreakOnReg loop-header = BB"
+void AMDGPUCFGStructurizer::setLoopLandBlock(MachineLoop *loopRep,
+ MachineBasicBlock *MBB) {
+ MachineBasicBlock *&TheEntry = LLInfoMap[loopRep];
+ if (!MBB) {
+ MBB = FuncRep->CreateMachineBasicBlock();
+ FuncRep->push_back(MBB); //insert to function
+ SHOWNEWBLK(MBB, "DummyLandingBlock for loop without break: ");
+ }
+ TheEntry = MBB;
+ DEBUG(
+ dbgs() << "setLoopLandBlock loop-header = BB"
<< loopRep->getHeader()->getNumber()
- << " regNum = " << regNum << "\n";
- }
-} // addLoopBreakOnReg
-
-template<class PassT>
-void CFGStructurizer<PassT>::addLoopContOnReg(LoopT *loopRep, RegiT regNum) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
- theEntry->contOnRegs.insert(regNum);
-
- if (DEBUGME) {
- errs() << "addLoopContOnReg loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " regNum = " << regNum << "\n";
- }
-} // addLoopContOnReg
-
-template<class PassT>
-void CFGStructurizer<PassT>::addLoopBreakInitReg(LoopT *loopRep, RegiT regNum) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
- theEntry->breakInitRegs.insert(regNum);
-
- if (DEBUGME) {
- errs() << "addLoopBreakInitReg loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " regNum = " << regNum << "\n";
- }
-} // addLoopBreakInitReg
-
-template<class PassT>
-void CFGStructurizer<PassT>::addLoopContInitReg(LoopT *loopRep, RegiT regNum) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
- theEntry->contInitRegs.insert(regNum);
-
- if (DEBUGME) {
- errs() << "addLoopContInitReg loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " regNum = " << regNum << "\n";
- }
-} // addLoopContInitReg
-
-template<class PassT>
-void CFGStructurizer<PassT>::addLoopEndbranchInitReg(LoopT *loopRep,
- RegiT regNum) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- if (theEntry == NULL) {
- theEntry = new LoopLandInfo();
- }
- theEntry->endbranchInitRegs.insert(regNum);
-
- if (DEBUGME) {
- errs() << "addLoopEndbranchInitReg loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " regNum = " << regNum << "\n";
- }
-} // addLoopEndbranchInitReg
-
-template<class PassT>
-typename CFGStructurizer<PassT>::LoopLandInfo *
-CFGStructurizer<PassT>::getLoopLandInfo(LoopT *loopRep) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- return theEntry;
-} // getLoopLandInfo
-
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::getLoopLandBlock(LoopT *loopRep) {
- LoopLandInfo *&theEntry = loopLandInfoMap[loopRep];
-
- return theEntry ? theEntry->landBlk : NULL;
-} // getLoopLandBlock
-
-
-template<class PassT>
-bool CFGStructurizer<PassT>::hasBackEdge(BlockT *curBlk) {
- LoopT *loopRep = loopInfo->getLoopFor(curBlk);
- if (loopRep == NULL)
- return false;
-
- BlockT *loopHeader = loopRep->getHeader();
-
- return curBlk->isSuccessor(loopHeader);
-
-} //hasBackEdge
-
-template<class PassT>
-unsigned CFGStructurizer<PassT>::getLoopDepth(LoopT *loopRep) {
- return loopRep ? loopRep->getLoopDepth() : 0;
-} //getLoopDepth
-
-template<class PassT>
-int CFGStructurizer<PassT>::countActiveBlock
-(typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::const_iterator iterStart,
- typename SmallVector<BlockT*, DEFAULT_VEC_SLOTS>::const_iterator iterEnd) {
- int count = 0;
- while (iterStart != iterEnd) {
- if (!isRetiredBlock(*iterStart)) {
- ++count;
- }
- ++iterStart;
- }
-
- return count;
-} //countActiveBlock
+ << " landing-block = BB" << MBB->getNumber() << "\n";
+ );
+}
-// This is work around solution for findNearestCommonDominator not avaiable to
-// post dom a proper fix should go to Dominators.h.
+MachineBasicBlock *
+AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1,
+ MachineBasicBlock *MBB2) {
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT*
-CFGStructurizer<PassT>::findNearestCommonPostDom(BlockT *blk1, BlockT *blk2) {
+ if (PDT->dominates(MBB1, MBB2))
+ return MBB1;
+ if (PDT->dominates(MBB2, MBB1))
+ return MBB2;
- if (postDomTree->dominates(blk1, blk2)) {
- return blk1;
- }
- if (postDomTree->dominates(blk2, blk1)) {
- return blk2;
- }
-
- DomTreeNodeT *node1 = postDomTree->getNode(blk1);
- DomTreeNodeT *node2 = postDomTree->getNode(blk2);
+ MachineDomTreeNode *Node1 = PDT->getNode(MBB1);
+ MachineDomTreeNode *Node2 = PDT->getNode(MBB2);
// Handle newly cloned node.
- if (node1 == NULL && blk1->succ_size() == 1) {
- return findNearestCommonPostDom(*blk1->succ_begin(), blk2);
- }
- if (node2 == NULL && blk2->succ_size() == 1) {
- return findNearestCommonPostDom(blk1, *blk2->succ_begin());
- }
+ if (!Node1 && MBB1->succ_size() == 1)
+ return findNearestCommonPostDom(*MBB1->succ_begin(), MBB2);
+ if (!Node2 && MBB2->succ_size() == 1)
+ return findNearestCommonPostDom(MBB1, *MBB2->succ_begin());
- if (node1 == NULL || node2 == NULL) {
+ if (!Node1 || !Node2)
return NULL;
- }
- node1 = node1->getIDom();
- while (node1) {
- if (postDomTree->dominates(node1, node2)) {
- return node1->getBlock();
- }
- node1 = node1->getIDom();
+ Node1 = Node1->getIDom();
+ while (Node1) {
+ if (PDT->dominates(Node1, Node2))
+ return Node1->getBlock();
+ Node1 = Node1->getIDom();
}
return NULL;
}
-template<class PassT>
-typename CFGStructurizer<PassT>::BlockT *
-CFGStructurizer<PassT>::findNearestCommonPostDom
-(typename std::set<BlockT *> &blks) {
- BlockT *commonDom;
- typename std::set<BlockT *>::const_iterator iter = blks.begin();
- typename std::set<BlockT *>::const_iterator iterEnd = blks.end();
- for (commonDom = *iter; iter != iterEnd && commonDom != NULL; ++iter) {
- BlockT *curBlk = *iter;
- if (curBlk != commonDom) {
- commonDom = findNearestCommonPostDom(curBlk, commonDom);
- }
- }
-
- if (DEBUGME) {
- errs() << "Common post dominator for exit blocks is ";
- if (commonDom) {
- errs() << "BB" << commonDom->getNumber() << "\n";
- } else {
- errs() << "NULL\n";
- }
- }
-
- return commonDom;
-} //findNearestCommonPostDom
-
-} //end namespace llvm
-
-//todo: move-end
-
-
-//===----------------------------------------------------------------------===//
-//
-// CFGStructurizer for AMDGPU
-//
-//===----------------------------------------------------------------------===//
-
-
-using namespace llvmCFGStruct;
-
-namespace llvm {
-class AMDGPUCFGStructurizer : public MachineFunctionPass {
-public:
- typedef MachineInstr InstructionType;
- typedef MachineFunction FunctionType;
- typedef MachineBasicBlock BlockType;
- typedef MachineLoopInfo LoopinfoType;
- typedef MachineDominatorTree DominatortreeType;
- typedef MachinePostDominatorTree PostDominatortreeType;
- typedef MachineDomTreeNode DomTreeNodeType;
- typedef MachineLoop LoopType;
-
-protected:
- TargetMachine &TM;
- const TargetInstrInfo *TII;
- const AMDGPURegisterInfo *TRI;
-
-public:
- AMDGPUCFGStructurizer(char &pid, TargetMachine &tm);
- const TargetInstrInfo *getTargetInstrInfo() const;
-
-private:
-
-};
-
-} //end of namespace llvm
-AMDGPUCFGStructurizer::AMDGPUCFGStructurizer(char &pid, TargetMachine &tm)
-: MachineFunctionPass(pid), TM(tm), TII(tm.getInstrInfo()),
- TRI(static_cast<const AMDGPURegisterInfo *>(tm.getRegisterInfo())) {
-}
-
-const TargetInstrInfo *AMDGPUCFGStructurizer::getTargetInstrInfo() const {
- return TII;
+MachineBasicBlock *
+AMDGPUCFGStructurizer::findNearestCommonPostDom(
+ std::set<MachineBasicBlock *> &MBBs) {
+ MachineBasicBlock *CommonDom;
+ std::set<MachineBasicBlock *>::const_iterator It = MBBs.begin();
+ std::set<MachineBasicBlock *>::const_iterator E = MBBs.end();
+ for (CommonDom = *It; It != E && CommonDom; ++It) {
+ MachineBasicBlock *MBB = *It;
+ if (MBB != CommonDom)
+ CommonDom = findNearestCommonPostDom(MBB, CommonDom);
+ }
+
+ DEBUG(
+ dbgs() << "Common post dominator for exit blocks is ";
+ if (CommonDom)
+ dbgs() << "BB" << CommonDom->getNumber() << "\n";
+ else
+ dbgs() << "NULL\n";
+ );
+
+ return CommonDom;
}
-//===----------------------------------------------------------------------===//
-//
-// CFGPrepare
-//
-//===----------------------------------------------------------------------===//
-
-
-using namespace llvmCFGStruct;
-
-namespace llvm {
-class AMDGPUCFGPrepare : public AMDGPUCFGStructurizer {
-public:
- static char ID;
-public:
- AMDGPUCFGPrepare(TargetMachine &tm);
-
- virtual const char *getPassName() const;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
-
- bool runOnMachineFunction(MachineFunction &F);
+char AMDGPUCFGStructurizer::ID = 0;
-private:
-
-};
+} // end anonymous namespace
-char AMDGPUCFGPrepare::ID = 0;
-} //end of namespace llvm
-
-AMDGPUCFGPrepare::AMDGPUCFGPrepare(TargetMachine &tm)
- : AMDGPUCFGStructurizer(ID, tm ) {
-}
-const char *AMDGPUCFGPrepare::getPassName() const {
- return "AMD IL Control Flow Graph Preparation Pass";
-}
-
-void AMDGPUCFGPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<MachineFunctionAnalysis>();
- AU.addRequired<MachineFunctionAnalysis>();
- AU.addRequired<MachineDominatorTree>();
- AU.addRequired<MachinePostDominatorTree>();
- AU.addRequired<MachineLoopInfo>();
-}
-
-//===----------------------------------------------------------------------===//
-//
-// CFGPerform
-//
-//===----------------------------------------------------------------------===//
-
-
-using namespace llvmCFGStruct;
-
-namespace llvm {
-class AMDGPUCFGPerform : public AMDGPUCFGStructurizer {
-public:
- static char ID;
-
-public:
- AMDGPUCFGPerform(TargetMachine &tm);
- virtual const char *getPassName() const;
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- bool runOnMachineFunction(MachineFunction &F);
-
-private:
-
-};
-
-char AMDGPUCFGPerform::ID = 0;
-} //end of namespace llvm
-
- AMDGPUCFGPerform::AMDGPUCFGPerform(TargetMachine &tm)
-: AMDGPUCFGStructurizer(ID, tm) {
-}
-
-const char *AMDGPUCFGPerform::getPassName() const {
- return "AMD IL Control Flow Graph structurizer Pass";
-}
-
-void AMDGPUCFGPerform::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreserved<MachineFunctionAnalysis>();
- AU.addRequired<MachineFunctionAnalysis>();
- AU.addRequired<MachineDominatorTree>();
- AU.addRequired<MachinePostDominatorTree>();
- AU.addRequired<MachineLoopInfo>();
-}
-
-//===----------------------------------------------------------------------===//
-//
-// CFGStructTraits<AMDGPUCFGStructurizer>
-//
-//===----------------------------------------------------------------------===//
-
-namespace llvmCFGStruct {
-// this class is tailor to the AMDGPU backend
-template<>
-struct CFGStructTraits<AMDGPUCFGStructurizer> {
- typedef int RegiT;
-
- static int getBranchNzeroOpcode(int oldOpcode) {
- switch(oldOpcode) {
- case AMDGPU::JUMP_COND:
- case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
- case AMDGPU::BRANCH_COND_i32:
- case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALNZ_f32;
- default:
- assert(0 && "internal error");
- }
- return -1;
- }
-
- static int getBranchZeroOpcode(int oldOpcode) {
- switch(oldOpcode) {
- case AMDGPU::JUMP_COND:
- case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
- case AMDGPU::BRANCH_COND_i32:
- case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALZ_f32;
- default:
- assert(0 && "internal error");
- }
- return -1;
- }
-
- static int getContinueNzeroOpcode(int oldOpcode) {
- switch(oldOpcode) {
- case AMDGPU::JUMP_COND:
- case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALNZ_i32;
- default:
- assert(0 && "internal error");
- };
- return -1;
- }
-
- static int getContinueZeroOpcode(int oldOpcode) {
- switch(oldOpcode) {
- case AMDGPU::JUMP_COND:
- case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALZ_i32;
- default:
- assert(0 && "internal error");
- }
- return -1;
- }
-
- static MachineBasicBlock *getTrueBranch(MachineInstr *instr) {
- return instr->getOperand(0).getMBB();
- }
-
- static void setTrueBranch(MachineInstr *instr, MachineBasicBlock *blk) {
- instr->getOperand(0).setMBB(blk);
- }
-
- static MachineBasicBlock *
- getFalseBranch(MachineBasicBlock *blk, MachineInstr *instr) {
- assert(blk->succ_size() == 2);
- MachineBasicBlock *trueBranch = getTrueBranch(instr);
- MachineBasicBlock::succ_iterator iter = blk->succ_begin();
- MachineBasicBlock::succ_iterator iterNext = iter;
- ++iterNext;
-
- return (*iter == trueBranch) ? *iterNext : *iter;
- }
-
- static bool isCondBranch(MachineInstr *instr) {
- switch (instr->getOpcode()) {
- case AMDGPU::JUMP_COND:
- case AMDGPU::BRANCH_COND_i32:
- case AMDGPU::BRANCH_COND_f32:
- break;
- default:
- return false;
- }
- return true;
- }
-
- static bool isUncondBranch(MachineInstr *instr) {
- switch (instr->getOpcode()) {
- case AMDGPU::JUMP:
- case AMDGPU::BRANCH:
- return true;
- default:
- return false;
- }
- return true;
- }
-
- static DebugLoc getLastDebugLocInBB(MachineBasicBlock *blk) {
- //get DebugLoc from the first MachineBasicBlock instruction with debug info
- DebugLoc DL;
- for (MachineBasicBlock::iterator iter = blk->begin(); iter != blk->end(); ++iter) {
- MachineInstr *instr = &(*iter);
- if (instr->getDebugLoc().isUnknown() == false) {
- DL = instr->getDebugLoc();
- }
- }
- return DL;
- }
-
- static MachineInstr *getNormalBlockBranchInstr(MachineBasicBlock *blk) {
- MachineBasicBlock::reverse_iterator iter = blk->rbegin();
- MachineInstr *instr = &*iter;
- if (instr && (isCondBranch(instr) || isUncondBranch(instr))) {
- return instr;
- }
- return NULL;
- }
-
- // The correct naming for this is getPossibleLoopendBlockBranchInstr.
- //
- // BB with backward-edge could have move instructions after the branch
- // instruction. Such move instruction "belong to" the loop backward-edge.
- //
- static MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *blk) {
- const AMDGPUInstrInfo * TII = static_cast<const AMDGPUInstrInfo *>(
- blk->getParent()->getTarget().getInstrInfo());
-
- for (MachineBasicBlock::reverse_iterator iter = blk->rbegin(),
- iterEnd = blk->rend(); iter != iterEnd; ++iter) {
- // FIXME: Simplify
- MachineInstr *instr = &*iter;
- if (instr) {
- if (isCondBranch(instr) || isUncondBranch(instr)) {
- return instr;
- } else if (!TII->isMov(instr->getOpcode())) {
- break;
- }
- }
- }
- return NULL;
- }
-
- static MachineInstr *getReturnInstr(MachineBasicBlock *blk) {
- MachineBasicBlock::reverse_iterator iter = blk->rbegin();
- if (iter != blk->rend()) {
- MachineInstr *instr = &(*iter);
- if (instr->getOpcode() == AMDGPU::RETURN) {
- return instr;
- }
- }
- return NULL;
- }
-
- static MachineInstr *getContinueInstr(MachineBasicBlock *blk) {
- MachineBasicBlock::reverse_iterator iter = blk->rbegin();
- if (iter != blk->rend()) {
- MachineInstr *instr = &(*iter);
- if (instr->getOpcode() == AMDGPU::CONTINUE) {
- return instr;
- }
- }
- return NULL;
- }
-
- static MachineInstr *getLoopBreakInstr(MachineBasicBlock *blk) {
- for (MachineBasicBlock::iterator iter = blk->begin(); (iter != blk->end()); ++iter) {
- MachineInstr *instr = &(*iter);
- if (instr->getOpcode() == AMDGPU::PREDICATED_BREAK) {
- return instr;
- }
- }
- return NULL;
- }
-
- static bool isReturnBlock(MachineBasicBlock *blk) {
- MachineInstr *instr = getReturnInstr(blk);
- bool isReturn = (blk->succ_size() == 0);
- if (instr) {
- assert(isReturn);
- } else if (isReturn) {
- if (DEBUGME) {
- errs() << "BB" << blk->getNumber()
- <<" is return block without RETURN instr\n";
- }
- }
-
- return isReturn;
- }
-
- static MachineBasicBlock::iterator
- getInstrPos(MachineBasicBlock *blk, MachineInstr *instr) {
- assert(instr->getParent() == blk && "instruction doesn't belong to block");
- MachineBasicBlock::iterator iter = blk->begin();
- MachineBasicBlock::iterator iterEnd = blk->end();
- while (&(*iter) != instr && iter != iterEnd) {
- ++iter;
- }
-
- assert(iter != iterEnd);
- return iter;
- }//getInstrPos
-
- static MachineInstr *insertInstrBefore(MachineBasicBlock *blk, int newOpcode,
- AMDGPUCFGStructurizer *passRep) {
- return insertInstrBefore(blk,newOpcode,passRep,DebugLoc());
- } //insertInstrBefore
-
- static MachineInstr *insertInstrBefore(MachineBasicBlock *blk, int newOpcode,
- AMDGPUCFGStructurizer *passRep, DebugLoc DL) {
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineInstr *newInstr =
- blk->getParent()->CreateMachineInstr(tii->get(newOpcode), DL);
-
- MachineBasicBlock::iterator res;
- if (blk->begin() != blk->end()) {
- blk->insert(blk->begin(), newInstr);
- } else {
- blk->push_back(newInstr);
- }
-
- SHOWNEWINSTR(newInstr);
-
- return newInstr;
- } //insertInstrBefore
-
- static void insertInstrEnd(MachineBasicBlock *blk, int newOpcode,
- AMDGPUCFGStructurizer *passRep) {
- insertInstrEnd(blk,newOpcode,passRep,DebugLoc());
- } //insertInstrEnd
-
- static void insertInstrEnd(MachineBasicBlock *blk, int newOpcode,
- AMDGPUCFGStructurizer *passRep, DebugLoc DL) {
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineInstr *newInstr = blk->getParent()
- ->CreateMachineInstr(tii->get(newOpcode), DL);
-
- blk->push_back(newInstr);
- //assume the instruction doesn't take any reg operand ...
-
- SHOWNEWINSTR(newInstr);
- } //insertInstrEnd
-
- static MachineInstr *insertInstrBefore(MachineBasicBlock::iterator instrPos,
- int newOpcode,
- AMDGPUCFGStructurizer *passRep) {
- MachineInstr *oldInstr = &(*instrPos);
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineBasicBlock *blk = oldInstr->getParent();
- MachineInstr *newInstr =
- blk->getParent()->CreateMachineInstr(tii->get(newOpcode),
- DebugLoc());
-
- blk->insert(instrPos, newInstr);
- //assume the instruction doesn't take any reg operand ...
-
- SHOWNEWINSTR(newInstr);
- return newInstr;
- } //insertInstrBefore
-
- static void insertCondBranchBefore(MachineBasicBlock::iterator instrPos,
- int newOpcode,
- AMDGPUCFGStructurizer *passRep,
- DebugLoc DL) {
- MachineInstr *oldInstr = &(*instrPos);
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineBasicBlock *blk = oldInstr->getParent();
- MachineFunction *MF = blk->getParent();
- MachineInstr *newInstr = MF->CreateMachineInstr(tii->get(newOpcode), DL);
-
- blk->insert(instrPos, newInstr);
- MachineInstrBuilder MIB(*MF, newInstr);
- MIB.addReg(oldInstr->getOperand(1).getReg(), false);
-
- SHOWNEWINSTR(newInstr);
- //erase later oldInstr->eraseFromParent();
- } //insertCondBranchBefore
-
- static void insertCondBranchBefore(MachineBasicBlock *blk,
- MachineBasicBlock::iterator insertPos,
- int newOpcode,
- AMDGPUCFGStructurizer *passRep,
- RegiT regNum,
- DebugLoc DL) {
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineFunction *MF = blk->getParent();
-
- MachineInstr *newInstr = MF->CreateMachineInstr(tii->get(newOpcode), DL);
-
- //insert before
- blk->insert(insertPos, newInstr);
- MachineInstrBuilder(*MF, newInstr).addReg(regNum, false);
-
- SHOWNEWINSTR(newInstr);
- } //insertCondBranchBefore
-
- static void insertCondBranchEnd(MachineBasicBlock *blk,
- int newOpcode,
- AMDGPUCFGStructurizer *passRep,
- RegiT regNum) {
- const TargetInstrInfo *tii = passRep->getTargetInstrInfo();
- MachineFunction *MF = blk->getParent();
- MachineInstr *newInstr =
- MF->CreateMachineInstr(tii->get(newOpcode), DebugLoc());
-
- blk->push_back(newInstr);
- MachineInstrBuilder(*MF, newInstr).addReg(regNum, false);
-
- SHOWNEWINSTR(newInstr);
- } //insertCondBranchEnd
-
-
- static void insertAssignInstrBefore(MachineBasicBlock::iterator instrPos,
- AMDGPUCFGStructurizer *passRep,
- RegiT regNum, int regVal) {
- MachineInstr *oldInstr = &(*instrPos);
- const AMDGPUInstrInfo *tii =
- static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
- MachineBasicBlock *blk = oldInstr->getParent();
- MachineInstr *newInstr = tii->getMovImmInstr(blk->getParent(), regNum,
- regVal);
- blk->insert(instrPos, newInstr);
-
- SHOWNEWINSTR(newInstr);
- } //insertAssignInstrBefore
-
- static void insertAssignInstrBefore(MachineBasicBlock *blk,
- AMDGPUCFGStructurizer *passRep,
- RegiT regNum, int regVal) {
- const AMDGPUInstrInfo *tii =
- static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
-
- MachineInstr *newInstr = tii->getMovImmInstr(blk->getParent(), regNum,
- regVal);
- if (blk->begin() != blk->end()) {
- blk->insert(blk->begin(), newInstr);
- } else {
- blk->push_back(newInstr);
- }
-
- SHOWNEWINSTR(newInstr);
-
- } //insertInstrBefore
-
- static void insertCompareInstrBefore(MachineBasicBlock *blk,
- MachineBasicBlock::iterator instrPos,
- AMDGPUCFGStructurizer *passRep,
- RegiT dstReg, RegiT src1Reg,
- RegiT src2Reg) {
- const AMDGPUInstrInfo *tii =
- static_cast<const AMDGPUInstrInfo *>(passRep->getTargetInstrInfo());
- MachineFunction *MF = blk->getParent();
- MachineInstr *newInstr =
- MF->CreateMachineInstr(tii->get(tii->getIEQOpcode()), DebugLoc());
-
- MachineInstrBuilder MIB(*MF, newInstr);
- MIB.addReg(dstReg, RegState::Define); //set target
- MIB.addReg(src1Reg); //set src value
- MIB.addReg(src2Reg); //set src value
-
- blk->insert(instrPos, newInstr);
- SHOWNEWINSTR(newInstr);
-
- } //insertCompareInstrBefore
-
- static void cloneSuccessorList(MachineBasicBlock *dstBlk,
- MachineBasicBlock *srcBlk) {
- for (MachineBasicBlock::succ_iterator iter = srcBlk->succ_begin(),
- iterEnd = srcBlk->succ_end(); iter != iterEnd; ++iter) {
- dstBlk->addSuccessor(*iter); // *iter's predecessor is also taken care of
- }
- } //cloneSuccessorList
-
- static MachineBasicBlock *clone(MachineBasicBlock *srcBlk) {
- MachineFunction *func = srcBlk->getParent();
- MachineBasicBlock *newBlk = func->CreateMachineBasicBlock();
- func->push_back(newBlk); //insert to function
- for (MachineBasicBlock::iterator iter = srcBlk->begin(),
- iterEnd = srcBlk->end();
- iter != iterEnd; ++iter) {
- MachineInstr *instr = func->CloneMachineInstr(iter);
- newBlk->push_back(instr);
- }
- return newBlk;
- }
-
- //MachineBasicBlock::ReplaceUsesOfBlockWith doesn't serve the purpose because
- //the AMDGPU instruction is not recognized as terminator fix this and retire
- //this routine
- static void replaceInstrUseOfBlockWith(MachineBasicBlock *srcBlk,
- MachineBasicBlock *oldBlk,
- MachineBasicBlock *newBlk) {
- MachineInstr *branchInstr = getLoopendBlockBranchInstr(srcBlk);
- if (branchInstr && isCondBranch(branchInstr) &&
- getTrueBranch(branchInstr) == oldBlk) {
- setTrueBranch(branchInstr, newBlk);
- }
- }
-
- static void wrapup(MachineBasicBlock *entryBlk) {
- assert((!entryBlk->getParent()->getJumpTableInfo()
- || entryBlk->getParent()->getJumpTableInfo()->isEmpty())
- && "found a jump table");
-
- //collect continue right before endloop
- SmallVector<MachineInstr *, DEFAULT_VEC_SLOTS> contInstr;
- MachineBasicBlock::iterator pre = entryBlk->begin();
- MachineBasicBlock::iterator iterEnd = entryBlk->end();
- MachineBasicBlock::iterator iter = pre;
- while (iter != iterEnd) {
- if (pre->getOpcode() == AMDGPU::CONTINUE
- && iter->getOpcode() == AMDGPU::ENDLOOP) {
- contInstr.push_back(pre);
- }
- pre = iter;
- ++iter;
- } //end while
-
- //delete continue right before endloop
- for (unsigned i = 0; i < contInstr.size(); ++i) {
- contInstr[i]->eraseFromParent();
- }
-
- // TODO to fix up jump table so later phase won't be confused. if
- // (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but
- // there isn't such an interface yet. alternatively, replace all the other
- // blocks in the jump table with the entryBlk //}
-
- } //wrapup
-
- static MachineDominatorTree *getDominatorTree(AMDGPUCFGStructurizer &pass) {
- return &pass.getAnalysis<MachineDominatorTree>();
- }
-
- static MachinePostDominatorTree*
- getPostDominatorTree(AMDGPUCFGStructurizer &pass) {
- return &pass.getAnalysis<MachinePostDominatorTree>();
- }
-
- static MachineLoopInfo *getLoopInfo(AMDGPUCFGStructurizer &pass) {
- return &pass.getAnalysis<MachineLoopInfo>();
- }
-}; // template class CFGStructTraits
-} //end of namespace llvm
-
-// createAMDGPUCFGPreparationPass- Returns a pass
-FunctionPass *llvm::createAMDGPUCFGPreparationPass(TargetMachine &tm
- ) {
- return new AMDGPUCFGPrepare(tm );
-}
-
-bool AMDGPUCFGPrepare::runOnMachineFunction(MachineFunction &func) {
- return llvmCFGStruct::CFGStructurizer<AMDGPUCFGStructurizer>().prepare(func,
- *this,
- TRI);
-}
-
-// createAMDGPUCFGStructurizerPass- Returns a pass
-FunctionPass *llvm::createAMDGPUCFGStructurizerPass(TargetMachine &tm
- ) {
- return new AMDGPUCFGPerform(tm );
-}
-bool AMDGPUCFGPerform::runOnMachineFunction(MachineFunction &func) {
- return llvmCFGStruct::CFGStructurizer<AMDGPUCFGStructurizer>().run(func,
- *this,
- TRI);
+FunctionPass *llvm::createAMDGPUCFGStructurizerPass(TargetMachine &tm) {
+ return new AMDGPUCFGStructurizer(tm);
}
diff --git a/lib/Target/R600/AMDILDevice.cpp b/lib/Target/R600/AMDILDevice.cpp
deleted file mode 100644
index db8e01e..0000000
--- a/lib/Target/R600/AMDILDevice.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-//===-- AMDILDevice.cpp - Base class for AMDIL Devices --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#include "AMDILDevice.h"
-#include "AMDGPUSubtarget.h"
-
-using namespace llvm;
-// Default implementation for all of the classes.
-AMDGPUDevice::AMDGPUDevice(AMDGPUSubtarget *ST) : mSTM(ST) {
- mHWBits.resize(AMDGPUDeviceInfo::MaxNumberCapabilities);
- mSWBits.resize(AMDGPUDeviceInfo::MaxNumberCapabilities);
- setCaps();
- DeviceFlag = OCL_DEVICE_ALL;
-}
-
-AMDGPUDevice::~AMDGPUDevice() {
- mHWBits.clear();
- mSWBits.clear();
-}
-
-size_t AMDGPUDevice::getMaxGDSSize() const {
- return 0;
-}
-
-uint32_t
-AMDGPUDevice::getDeviceFlag() const {
- return DeviceFlag;
-}
-
-size_t AMDGPUDevice::getMaxNumCBs() const {
- if (usesHardware(AMDGPUDeviceInfo::ConstantMem)) {
- return HW_MAX_NUM_CB;
- }
-
- return 0;
-}
-
-size_t AMDGPUDevice::getMaxCBSize() const {
- if (usesHardware(AMDGPUDeviceInfo::ConstantMem)) {
- return MAX_CB_SIZE;
- }
-
- return 0;
-}
-
-size_t AMDGPUDevice::getMaxScratchSize() const {
- return 65536;
-}
-
-uint32_t AMDGPUDevice::getStackAlignment() const {
- return 16;
-}
-
-void AMDGPUDevice::setCaps() {
- mSWBits.set(AMDGPUDeviceInfo::HalfOps);
- mSWBits.set(AMDGPUDeviceInfo::ByteOps);
- mSWBits.set(AMDGPUDeviceInfo::ShortOps);
- mSWBits.set(AMDGPUDeviceInfo::HW64BitDivMod);
- if (mSTM->isOverride(AMDGPUDeviceInfo::NoInline)) {
- mSWBits.set(AMDGPUDeviceInfo::NoInline);
- }
- if (mSTM->isOverride(AMDGPUDeviceInfo::MacroDB)) {
- mSWBits.set(AMDGPUDeviceInfo::MacroDB);
- }
- if (mSTM->isOverride(AMDGPUDeviceInfo::Debug)) {
- mSWBits.set(AMDGPUDeviceInfo::ConstantMem);
- } else {
- mHWBits.set(AMDGPUDeviceInfo::ConstantMem);
- }
- if (mSTM->isOverride(AMDGPUDeviceInfo::Debug)) {
- mSWBits.set(AMDGPUDeviceInfo::PrivateMem);
- } else {
- mHWBits.set(AMDGPUDeviceInfo::PrivateMem);
- }
- if (mSTM->isOverride(AMDGPUDeviceInfo::BarrierDetect)) {
- mSWBits.set(AMDGPUDeviceInfo::BarrierDetect);
- }
- mSWBits.set(AMDGPUDeviceInfo::ByteLDSOps);
- mSWBits.set(AMDGPUDeviceInfo::LongOps);
-}
-
-AMDGPUDeviceInfo::ExecutionMode
-AMDGPUDevice::getExecutionMode(AMDGPUDeviceInfo::Caps Caps) const {
- if (mHWBits[Caps]) {
- assert(!mSWBits[Caps] && "Cannot set both SW and HW caps");
- return AMDGPUDeviceInfo::Hardware;
- }
-
- if (mSWBits[Caps]) {
- assert(!mHWBits[Caps] && "Cannot set both SW and HW caps");
- return AMDGPUDeviceInfo::Software;
- }
-
- return AMDGPUDeviceInfo::Unsupported;
-
-}
-
-bool AMDGPUDevice::isSupported(AMDGPUDeviceInfo::Caps Mode) const {
- return getExecutionMode(Mode) != AMDGPUDeviceInfo::Unsupported;
-}
-
-bool AMDGPUDevice::usesHardware(AMDGPUDeviceInfo::Caps Mode) const {
- return getExecutionMode(Mode) == AMDGPUDeviceInfo::Hardware;
-}
-
-bool AMDGPUDevice::usesSoftware(AMDGPUDeviceInfo::Caps Mode) const {
- return getExecutionMode(Mode) == AMDGPUDeviceInfo::Software;
-}
-
-std::string
-AMDGPUDevice::getDataLayout() const {
- std::string DataLayout = std::string(
- "e"
- "-p:32:32:32"
- "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32"
- "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128"
- "-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048"
- "-n32:64"
- );
-
- if (usesHardware(AMDGPUDeviceInfo::DoubleOps)) {
- DataLayout.append("-f64:64:64");
- }
-
- return DataLayout;
-}
diff --git a/lib/Target/R600/AMDILDevice.h b/lib/Target/R600/AMDILDevice.h
deleted file mode 100644
index 97df98c..0000000
--- a/lib/Target/R600/AMDILDevice.h
+++ /dev/null
@@ -1,117 +0,0 @@
-//===---- AMDILDevice.h - Define Device Data for AMDGPU -----*- C++ -*------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface for the subtarget data classes.
-//
-/// This file will define the interface that each generation needs to
-/// implement in order to correctly answer queries on the capabilities of the
-/// specific hardware.
-//===----------------------------------------------------------------------===//
-#ifndef AMDILDEVICEIMPL_H
-#define AMDILDEVICEIMPL_H
-#include "AMDIL.h"
-#include "llvm/ADT/BitVector.h"
-
-namespace llvm {
- class AMDGPUSubtarget;
- class MCStreamer;
-//===----------------------------------------------------------------------===//
-// Interface for data that is specific to a single device
-//===----------------------------------------------------------------------===//
-class AMDGPUDevice {
-public:
- AMDGPUDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPUDevice();
-
- // Enum values for the various memory types.
- enum {
- RAW_UAV_ID = 0,
- ARENA_UAV_ID = 1,
- LDS_ID = 2,
- GDS_ID = 3,
- SCRATCH_ID = 4,
- CONSTANT_ID = 5,
- GLOBAL_ID = 6,
- MAX_IDS = 7
- } IO_TYPE_IDS;
-
- /// \returns The max LDS size that the hardware supports. Size is in
- /// bytes.
- virtual size_t getMaxLDSSize() const = 0;
-
- /// \returns The max GDS size that the hardware supports if the GDS is
- /// supported by the hardware. Size is in bytes.
- virtual size_t getMaxGDSSize() const;
-
- /// \returns The max number of hardware constant address spaces that
- /// are supported by this device.
- virtual size_t getMaxNumCBs() const;
-
- /// \returns The max number of bytes a single hardware constant buffer
- /// can support. Size is in bytes.
- virtual size_t getMaxCBSize() const;
-
- /// \returns The max number of bytes allowed by the hardware scratch
- /// buffer. Size is in bytes.
- virtual size_t getMaxScratchSize() const;
-
- /// \brief Get the flag that corresponds to the device.
- virtual uint32_t getDeviceFlag() const;
-
- /// \returns The number of work-items that exist in a single hardware
- /// wavefront.
- virtual size_t getWavefrontSize() const = 0;
-
- /// \brief Get the generational name of this specific device.
- virtual uint32_t getGeneration() const = 0;
-
- /// \brief Get the stack alignment of this specific device.
- virtual uint32_t getStackAlignment() const;
-
- /// \brief Get the resource ID for this specific device.
- virtual uint32_t getResourceID(uint32_t DeviceID) const = 0;
-
- /// \brief Get the max number of UAV's for this device.
- virtual uint32_t getMaxNumUAVs() const = 0;
-
-
- // API utilizing more detailed capabilities of each family of
- // cards. If a capability is supported, then either usesHardware or
- // usesSoftware returned true. If usesHardware returned true, then
- // usesSoftware must return false for the same capability. Hardware
- // execution means that the feature is done natively by the hardware
- // and is not emulated by the softare. Software execution means
- // that the feature could be done in the hardware, but there is
- // software that emulates it with possibly using the hardware for
- // support since the hardware does not fully comply with OpenCL
- // specs.
-
- bool isSupported(AMDGPUDeviceInfo::Caps Mode) const;
- bool usesHardware(AMDGPUDeviceInfo::Caps Mode) const;
- bool usesSoftware(AMDGPUDeviceInfo::Caps Mode) const;
- virtual std::string getDataLayout() const;
- static const unsigned int MAX_LDS_SIZE_700 = 16384;
- static const unsigned int MAX_LDS_SIZE_800 = 32768;
- static const unsigned int WavefrontSize = 64;
- static const unsigned int HalfWavefrontSize = 32;
- static const unsigned int QuarterWavefrontSize = 16;
-protected:
- virtual void setCaps();
- BitVector mHWBits;
- llvm::BitVector mSWBits;
- AMDGPUSubtarget *mSTM;
- uint32_t DeviceFlag;
-private:
- AMDGPUDeviceInfo::ExecutionMode
- getExecutionMode(AMDGPUDeviceInfo::Caps Caps) const;
-};
-
-} // namespace llvm
-#endif // AMDILDEVICEIMPL_H
diff --git a/lib/Target/R600/AMDILDeviceInfo.cpp b/lib/Target/R600/AMDILDeviceInfo.cpp
deleted file mode 100644
index 126514b..0000000
--- a/lib/Target/R600/AMDILDeviceInfo.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-//===-- AMDILDeviceInfo.cpp - AMDILDeviceInfo class -----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Function that creates DeviceInfo from a device name and other information.
-//
-//==-----------------------------------------------------------------------===//
-#include "AMDILDevices.h"
-#include "AMDGPUSubtarget.h"
-
-using namespace llvm;
-namespace llvm {
-namespace AMDGPUDeviceInfo {
-
-AMDGPUDevice* getDeviceFromName(const std::string &deviceName,
- AMDGPUSubtarget *ptr,
- bool is64bit, bool is64on32bit) {
- if (deviceName.c_str()[2] == '7') {
- switch (deviceName.c_str()[3]) {
- case '1':
- return new AMDGPU710Device(ptr);
- case '7':
- return new AMDGPU770Device(ptr);
- default:
- return new AMDGPU7XXDevice(ptr);
- }
- } else if (deviceName == "cypress") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUCypressDevice(ptr);
- } else if (deviceName == "juniper") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUEvergreenDevice(ptr);
- } else if (deviceName == "redwood" || deviceName == "sumo") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPURedwoodDevice(ptr);
- } else if (deviceName == "cedar") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUCedarDevice(ptr);
- } else if (deviceName == "barts" || deviceName == "turks") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUNIDevice(ptr);
- } else if (deviceName == "cayman") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUCaymanDevice(ptr);
- } else if (deviceName == "caicos") {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPUNIDevice(ptr);
- } else if (deviceName == "SI" ||
- deviceName == "tahiti" || deviceName == "pitcairn" ||
- deviceName == "verde" || deviceName == "oland" ||
- deviceName == "hainan") {
- return new AMDGPUSIDevice(ptr);
- } else {
-#if DEBUG
- assert(!is64bit && "This device does not support 64bit pointers!");
- assert(!is64on32bit && "This device does not support 64bit"
- " on 32bit pointers!");
-#endif
- return new AMDGPU7XXDevice(ptr);
- }
-}
-} // End namespace AMDGPUDeviceInfo
-} // End namespace llvm
diff --git a/lib/Target/R600/AMDILDeviceInfo.h b/lib/Target/R600/AMDILDeviceInfo.h
deleted file mode 100644
index 4b2c3a5..0000000
--- a/lib/Target/R600/AMDILDeviceInfo.h
+++ /dev/null
@@ -1,88 +0,0 @@
-//===-- AMDILDeviceInfo.h - Constants for describing devices --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#ifndef AMDILDEVICEINFO_H
-#define AMDILDEVICEINFO_H
-
-
-#include <string>
-
-namespace llvm {
- class AMDGPUDevice;
- class AMDGPUSubtarget;
- namespace AMDGPUDeviceInfo {
- /// Each Capabilities can be executed using a hardware instruction,
- /// emulated with a sequence of software instructions, or not
- /// supported at all.
- enum ExecutionMode {
- Unsupported = 0, ///< Unsupported feature on the card(Default value)
- /// This is the execution mode that is set if the feature is emulated in
- /// software.
- Software,
- /// This execution mode is set if the feature exists natively in hardware
- Hardware
- };
-
- enum Caps {
- HalfOps = 0x1, ///< Half float is supported or not.
- DoubleOps = 0x2, ///< Double is supported or not.
- ByteOps = 0x3, ///< Byte(char) is support or not.
- ShortOps = 0x4, ///< Short is supported or not.
- LongOps = 0x5, ///< Long is supported or not.
- Images = 0x6, ///< Images are supported or not.
- ByteStores = 0x7, ///< ByteStores available(!HD4XXX).
- ConstantMem = 0x8, ///< Constant/CB memory.
- LocalMem = 0x9, ///< Local/LDS memory.
- PrivateMem = 0xA, ///< Scratch/Private/Stack memory.
- RegionMem = 0xB, ///< OCL GDS Memory Extension.
- FMA = 0xC, ///< Use HW FMA or SW FMA.
- ArenaSegment = 0xD, ///< Use for Arena UAV per pointer 12-1023.
- MultiUAV = 0xE, ///< Use for UAV per Pointer 0-7.
- Reserved0 = 0xF, ///< ReservedFlag
- NoAlias = 0x10, ///< Cached loads.
- Signed24BitOps = 0x11, ///< Peephole Optimization.
- /// Debug mode implies that no hardware features or optimizations
- /// are performned and that all memory access go through a single
- /// uav(Arena on HD5XXX/HD6XXX and Raw on HD4XXX).
- Debug = 0x12,
- CachedMem = 0x13, ///< Cached mem is available or not.
- BarrierDetect = 0x14, ///< Detect duplicate barriers.
- Reserved1 = 0x15, ///< Reserved flag
- ByteLDSOps = 0x16, ///< Flag to specify if byte LDS ops are available.
- ArenaVectors = 0x17, ///< Flag to specify if vector loads from arena work.
- TmrReg = 0x18, ///< Flag to specify if Tmr register is supported.
- NoInline = 0x19, ///< Flag to specify that no inlining should occur.
- MacroDB = 0x1A, ///< Flag to specify that backend handles macrodb.
- HW64BitDivMod = 0x1B, ///< Flag for backend to generate 64bit div/mod.
- ArenaUAV = 0x1C, ///< Flag to specify that arena uav is supported.
- PrivateUAV = 0x1D, ///< Flag to specify that private memory uses uav's.
- /// If more capabilities are required, then
- /// this number needs to be increased.
- /// All capabilities must come before this
- /// number.
- MaxNumberCapabilities = 0x20
- };
- /// These have to be in order with the older generations
- /// having the lower number enumerations.
- enum Generation {
- HD4XXX = 0, ///< 7XX based devices.
- HD5XXX, ///< Evergreen based devices.
- HD6XXX, ///< NI/Evergreen+ based devices.
- HD7XXX, ///< Southern Islands based devices.
- HDTEST, ///< Experimental feature testing device.
- HDNUMGEN
- };
-
-
- AMDGPUDevice*
- getDeviceFromName(const std::string &name, AMDGPUSubtarget *ptr,
- bool is64bit = false, bool is64on32bit = false);
- } // namespace AMDILDeviceInfo
-} // namespace llvm
-#endif // AMDILDEVICEINFO_H
diff --git a/lib/Target/R600/AMDILDevices.h b/lib/Target/R600/AMDILDevices.h
deleted file mode 100644
index 636fa6d..0000000
--- a/lib/Target/R600/AMDILDevices.h
+++ /dev/null
@@ -1,19 +0,0 @@
-//===-- AMDILDevices.h - Consolidate AMDIL Device headers -----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#ifndef AMDIL_DEVICES_H
-#define AMDIL_DEVICES_H
-// Include all of the device specific header files
-#include "AMDIL7XXDevice.h"
-#include "AMDILDevice.h"
-#include "AMDILEvergreenDevice.h"
-#include "AMDILNIDevice.h"
-#include "AMDILSIDevice.h"
-
-#endif // AMDIL_DEVICES_H
diff --git a/lib/Target/R600/AMDILEvergreenDevice.cpp b/lib/Target/R600/AMDILEvergreenDevice.cpp
deleted file mode 100644
index c5213a0..0000000
--- a/lib/Target/R600/AMDILEvergreenDevice.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-//===-- AMDILEvergreenDevice.cpp - Device Info for Evergreen --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#include "AMDILEvergreenDevice.h"
-
-using namespace llvm;
-
-AMDGPUEvergreenDevice::AMDGPUEvergreenDevice(AMDGPUSubtarget *ST)
-: AMDGPUDevice(ST) {
- setCaps();
- std::string name = ST->getDeviceName();
- if (name == "cedar") {
- DeviceFlag = OCL_DEVICE_CEDAR;
- } else if (name == "redwood") {
- DeviceFlag = OCL_DEVICE_REDWOOD;
- } else if (name == "cypress") {
- DeviceFlag = OCL_DEVICE_CYPRESS;
- } else {
- DeviceFlag = OCL_DEVICE_JUNIPER;
- }
-}
-
-AMDGPUEvergreenDevice::~AMDGPUEvergreenDevice() {
-}
-
-size_t AMDGPUEvergreenDevice::getMaxLDSSize() const {
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return MAX_LDS_SIZE_800;
- } else {
- return 0;
- }
-}
-size_t AMDGPUEvergreenDevice::getMaxGDSSize() const {
- if (usesHardware(AMDGPUDeviceInfo::RegionMem)) {
- return MAX_LDS_SIZE_800;
- } else {
- return 0;
- }
-}
-uint32_t AMDGPUEvergreenDevice::getMaxNumUAVs() const {
- return 12;
-}
-
-uint32_t AMDGPUEvergreenDevice::getResourceID(uint32_t id) const {
- switch(id) {
- default:
- assert(0 && "ID type passed in is unknown!");
- break;
- case CONSTANT_ID:
- case RAW_UAV_ID:
- return GLOBAL_RETURN_RAW_UAV_ID;
- case GLOBAL_ID:
- case ARENA_UAV_ID:
- return DEFAULT_ARENA_UAV_ID;
- case LDS_ID:
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return DEFAULT_LDS_ID;
- } else {
- return DEFAULT_ARENA_UAV_ID;
- }
- case GDS_ID:
- if (usesHardware(AMDGPUDeviceInfo::RegionMem)) {
- return DEFAULT_GDS_ID;
- } else {
- return DEFAULT_ARENA_UAV_ID;
- }
- case SCRATCH_ID:
- if (usesHardware(AMDGPUDeviceInfo::PrivateMem)) {
- return DEFAULT_SCRATCH_ID;
- } else {
- return DEFAULT_ARENA_UAV_ID;
- }
- };
- return 0;
-}
-
-size_t AMDGPUEvergreenDevice::getWavefrontSize() const {
- return AMDGPUDevice::WavefrontSize;
-}
-
-uint32_t AMDGPUEvergreenDevice::getGeneration() const {
- return AMDGPUDeviceInfo::HD5XXX;
-}
-
-void AMDGPUEvergreenDevice::setCaps() {
- mSWBits.set(AMDGPUDeviceInfo::ArenaSegment);
- mHWBits.set(AMDGPUDeviceInfo::ArenaUAV);
- mHWBits.set(AMDGPUDeviceInfo::HW64BitDivMod);
- mSWBits.reset(AMDGPUDeviceInfo::HW64BitDivMod);
- mSWBits.set(AMDGPUDeviceInfo::Signed24BitOps);
- if (mSTM->isOverride(AMDGPUDeviceInfo::ByteStores)) {
- mHWBits.set(AMDGPUDeviceInfo::ByteStores);
- }
- if (mSTM->isOverride(AMDGPUDeviceInfo::Debug)) {
- mSWBits.set(AMDGPUDeviceInfo::LocalMem);
- mSWBits.set(AMDGPUDeviceInfo::RegionMem);
- } else {
- mHWBits.set(AMDGPUDeviceInfo::LocalMem);
- mHWBits.set(AMDGPUDeviceInfo::RegionMem);
- }
- mHWBits.set(AMDGPUDeviceInfo::Images);
- if (mSTM->isOverride(AMDGPUDeviceInfo::NoAlias)) {
- mHWBits.set(AMDGPUDeviceInfo::NoAlias);
- }
- mHWBits.set(AMDGPUDeviceInfo::CachedMem);
- if (mSTM->isOverride(AMDGPUDeviceInfo::MultiUAV)) {
- mHWBits.set(AMDGPUDeviceInfo::MultiUAV);
- }
- mHWBits.set(AMDGPUDeviceInfo::ByteLDSOps);
- mSWBits.reset(AMDGPUDeviceInfo::ByteLDSOps);
- mHWBits.set(AMDGPUDeviceInfo::ArenaVectors);
- mHWBits.set(AMDGPUDeviceInfo::LongOps);
- mSWBits.reset(AMDGPUDeviceInfo::LongOps);
- mHWBits.set(AMDGPUDeviceInfo::TmrReg);
-}
-
-AMDGPUCypressDevice::AMDGPUCypressDevice(AMDGPUSubtarget *ST)
- : AMDGPUEvergreenDevice(ST) {
- setCaps();
-}
-
-AMDGPUCypressDevice::~AMDGPUCypressDevice() {
-}
-
-void AMDGPUCypressDevice::setCaps() {
- if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) {
- mHWBits.set(AMDGPUDeviceInfo::DoubleOps);
- mHWBits.set(AMDGPUDeviceInfo::FMA);
- }
-}
-
-
-AMDGPUCedarDevice::AMDGPUCedarDevice(AMDGPUSubtarget *ST)
- : AMDGPUEvergreenDevice(ST) {
- setCaps();
-}
-
-AMDGPUCedarDevice::~AMDGPUCedarDevice() {
-}
-
-void AMDGPUCedarDevice::setCaps() {
- mSWBits.set(AMDGPUDeviceInfo::FMA);
-}
-
-size_t AMDGPUCedarDevice::getWavefrontSize() const {
- return AMDGPUDevice::QuarterWavefrontSize;
-}
-
-AMDGPURedwoodDevice::AMDGPURedwoodDevice(AMDGPUSubtarget *ST)
- : AMDGPUEvergreenDevice(ST) {
- setCaps();
-}
-
-AMDGPURedwoodDevice::~AMDGPURedwoodDevice() {
-}
-
-void AMDGPURedwoodDevice::setCaps() {
- mSWBits.set(AMDGPUDeviceInfo::FMA);
-}
-
-size_t AMDGPURedwoodDevice::getWavefrontSize() const {
- return AMDGPUDevice::HalfWavefrontSize;
-}
diff --git a/lib/Target/R600/AMDILEvergreenDevice.h b/lib/Target/R600/AMDILEvergreenDevice.h
deleted file mode 100644
index ea90f77..0000000
--- a/lib/Target/R600/AMDILEvergreenDevice.h
+++ /dev/null
@@ -1,93 +0,0 @@
-//==- AMDILEvergreenDevice.h - Define Evergreen Device for AMDIL -*- C++ -*--=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface for the subtarget data classes.
-///
-/// This file will define the interface that each generation needs to
-/// implement in order to correctly answer queries on the capabilities of the
-/// specific hardware.
-//===----------------------------------------------------------------------===//
-#ifndef AMDILEVERGREENDEVICE_H
-#define AMDILEVERGREENDEVICE_H
-#include "AMDGPUSubtarget.h"
-#include "AMDILDevice.h"
-
-namespace llvm {
- class AMDGPUSubtarget;
-//===----------------------------------------------------------------------===//
-// Evergreen generation of devices and their respective sub classes
-//===----------------------------------------------------------------------===//
-
-
-/// \brief The AMDGPUEvergreenDevice is the base device class for all of the Evergreen
-/// series of cards.
-///
-/// This class contains information required to differentiate
-/// the Evergreen device from the generic AMDGPUDevice. This device represents
-/// that capabilities of the 'Juniper' cards, also known as the HD57XX.
-class AMDGPUEvergreenDevice : public AMDGPUDevice {
-public:
- AMDGPUEvergreenDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPUEvergreenDevice();
- virtual size_t getMaxLDSSize() const;
- virtual size_t getMaxGDSSize() const;
- virtual size_t getWavefrontSize() const;
- virtual uint32_t getGeneration() const;
- virtual uint32_t getMaxNumUAVs() const;
- virtual uint32_t getResourceID(uint32_t) const;
-protected:
- virtual void setCaps();
-};
-
-/// The AMDGPUCypressDevice is similiar to the AMDGPUEvergreenDevice, except it has
-/// support for double precision operations. This device is used to represent
-/// both the Cypress and Hemlock cards, which are commercially known as HD58XX
-/// and HD59XX cards.
-class AMDGPUCypressDevice : public AMDGPUEvergreenDevice {
-public:
- AMDGPUCypressDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPUCypressDevice();
-private:
- virtual void setCaps();
-};
-
-
-/// \brief The AMDGPUCedarDevice is the class that represents all of the 'Cedar' based
-/// devices.
-///
-/// This class differs from the base AMDGPUEvergreenDevice in that the
-/// device is a ~quarter of the 'Juniper'. These are commercially known as the
-/// HD54XX and HD53XX series of cards.
-class AMDGPUCedarDevice : public AMDGPUEvergreenDevice {
-public:
- AMDGPUCedarDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPUCedarDevice();
- virtual size_t getWavefrontSize() const;
-private:
- virtual void setCaps();
-};
-
-/// \brief The AMDGPURedwoodDevice is the class the represents all of the 'Redwood' based
-/// devices.
-///
-/// This class differs from the base class, in that these devices are
-/// considered about half of a 'Juniper' device. These are commercially known as
-/// the HD55XX and HD56XX series of cards.
-class AMDGPURedwoodDevice : public AMDGPUEvergreenDevice {
-public:
- AMDGPURedwoodDevice(AMDGPUSubtarget *ST);
- virtual ~AMDGPURedwoodDevice();
- virtual size_t getWavefrontSize() const;
-private:
- virtual void setCaps();
-};
-
-} // namespace llvm
-#endif // AMDILEVERGREENDEVICE_H
diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
deleted file mode 100644
index ba75a44..0000000
--- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
+++ /dev/null
@@ -1,666 +0,0 @@
-//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Defines an instruction selector for the AMDGPU target.
-//
-//===----------------------------------------------------------------------===//
-#include "AMDGPUInstrInfo.h"
-#include "AMDGPUISelLowering.h" // For AMDGPUISD
-#include "AMDGPURegisterInfo.h"
-#include "AMDILDevices.h"
-#include "R600InstrInfo.h"
-#include "SIISelLowering.h"
-#include "llvm/ADT/ValueMap.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include <list>
-#include <queue>
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Instruction Selector Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-/// AMDGPU specific code to select AMDGPU machine instructions for
-/// SelectionDAG operations.
-class AMDGPUDAGToDAGISel : public SelectionDAGISel {
- // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
- // make the right decision when generating code for different targets.
- const AMDGPUSubtarget &Subtarget;
-public:
- AMDGPUDAGToDAGISel(TargetMachine &TM);
- virtual ~AMDGPUDAGToDAGISel();
-
- SDNode *Select(SDNode *N);
- virtual const char *getPassName() const;
- virtual void PostprocessISelDAG();
-
-private:
- inline SDValue getSmallIPtrImm(unsigned Imm);
- bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
-
- // Complex pattern selectors
- bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
- bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
- bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
-
- static bool checkType(const Value *ptr, unsigned int addrspace);
- static const Value *getBasePointerValue(const Value *V);
-
- static bool isGlobalStore(const StoreSDNode *N);
- static bool isPrivateStore(const StoreSDNode *N);
- static bool isLocalStore(const StoreSDNode *N);
- static bool isRegionStore(const StoreSDNode *N);
-
- static bool isCPLoad(const LoadSDNode *N);
- static bool isConstantLoad(const LoadSDNode *N, int cbID);
- static bool isGlobalLoad(const LoadSDNode *N);
- static bool isParamLoad(const LoadSDNode *N);
- static bool isPrivateLoad(const LoadSDNode *N);
- static bool isLocalLoad(const LoadSDNode *N);
- static bool isRegionLoad(const LoadSDNode *N);
-
- bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
- bool SelectGlobalValueVariableOffset(SDValue Addr,
- SDValue &BaseReg, SDValue& Offset);
- bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
- bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
-
- // Include the pieces autogenerated from the target description.
-#include "AMDGPUGenDAGISel.inc"
-};
-} // end anonymous namespace
-
-/// \brief This pass converts a legalized DAG into a AMDGPU-specific
-// DAG, ready for instruction scheduling.
-FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
- ) {
- return new AMDGPUDAGToDAGISel(TM);
-}
-
-AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM
- )
- : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
-}
-
-AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
-}
-
-SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRParam(
- SDValue Addr, SDValue& R1, SDValue& R2) {
-
- if (Addr.getOpcode() == ISD::FrameIndex) {
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
- R2 = CurDAG->getTargetConstant(0, MVT::i32);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i32);
- }
- } else if (Addr.getOpcode() == ISD::ADD) {
- R1 = Addr.getOperand(0);
- R2 = Addr.getOperand(1);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i32);
- }
- return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
- if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
- Addr.getOpcode() == ISD::TargetGlobalAddress) {
- return false;
- }
- return SelectADDRParam(Addr, R1, R2);
-}
-
-
-bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
- if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
- Addr.getOpcode() == ISD::TargetGlobalAddress) {
- return false;
- }
-
- if (Addr.getOpcode() == ISD::FrameIndex) {
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
- R2 = CurDAG->getTargetConstant(0, MVT::i64);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i64);
- }
- } else if (Addr.getOpcode() == ISD::ADD) {
- R1 = Addr.getOperand(0);
- R2 = Addr.getOperand(1);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, MVT::i64);
- }
- return true;
-}
-
-SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
- unsigned int Opc = N->getOpcode();
- if (N->isMachineOpcode()) {
- return NULL; // Already selected.
- }
- switch (Opc) {
- default: break;
- case ISD::BUILD_VECTOR: {
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
- break;
- }
- // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
- // that adds a 128 bits reg copy when going through TwoAddressInstructions
- // pass. We want to avoid 128 bits copies as much as possible because they
- // can't be bundled by our scheduler.
- SDValue RegSeqArgs[9] = {
- CurDAG->getTargetConstant(AMDGPU::R600_Reg128RegClassID, MVT::i32),
- SDValue(), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
- SDValue(), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32),
- SDValue(), CurDAG->getTargetConstant(AMDGPU::sub2, MVT::i32),
- SDValue(), CurDAG->getTargetConstant(AMDGPU::sub3, MVT::i32)
- };
- bool IsRegSeq = true;
- for (unsigned i = 0; i < N->getNumOperands(); i++) {
- if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
- IsRegSeq = false;
- break;
- }
- RegSeqArgs[2 * i + 1] = N->getOperand(i);
- }
- if (!IsRegSeq)
- break;
- return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
- RegSeqArgs, 2 * N->getNumOperands() + 1);
- }
- case ISD::BUILD_PAIR: {
- SDValue RC, SubReg0, SubReg1;
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- break;
- }
- if (N->getValueType(0) == MVT::i128) {
- RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
- SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
- SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
- } else if (N->getValueType(0) == MVT::i64) {
- RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
- SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
- SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
- } else {
- llvm_unreachable("Unhandled value type for BUILD_PAIR");
- }
- const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
- N->getOperand(1), SubReg1 };
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
- N->getDebugLoc(), N->getValueType(0), Ops);
- }
-
- case ISD::ConstantFP:
- case ISD::Constant: {
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
- // XXX: Custom immediate lowering not implemented yet. Instead we use
- // pseudo instructions defined in SIInstructions.td
- if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) {
- break;
- }
- const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo());
-
- uint64_t ImmValue = 0;
- unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
-
- if (N->getOpcode() == ISD::ConstantFP) {
- // XXX: 64-bit Immediates not supported yet
- assert(N->getValueType(0) != MVT::f64);
-
- ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
- APFloat Value = C->getValueAPF();
- float FloatValue = Value.convertToFloat();
- if (FloatValue == 0.0) {
- ImmReg = AMDGPU::ZERO;
- } else if (FloatValue == 0.5) {
- ImmReg = AMDGPU::HALF;
- } else if (FloatValue == 1.0) {
- ImmReg = AMDGPU::ONE;
- } else {
- ImmValue = Value.bitcastToAPInt().getZExtValue();
- }
- } else {
- // XXX: 64-bit Immediates not supported yet
- assert(N->getValueType(0) != MVT::i64);
-
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
- if (C->getZExtValue() == 0) {
- ImmReg = AMDGPU::ZERO;
- } else if (C->getZExtValue() == 1) {
- ImmReg = AMDGPU::ONE_INT;
- } else {
- ImmValue = C->getZExtValue();
- }
- }
-
- for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use);
- Use != SDNode::use_end(); Use = Next) {
- Next = llvm::next(Use);
- std::vector<SDValue> Ops;
- for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
- Ops.push_back(Use->getOperand(i));
- }
-
- if (!Use->isMachineOpcode()) {
- if (ImmReg == AMDGPU::ALU_LITERAL_X) {
- // We can only use literal constants (e.g. AMDGPU::ZERO,
- // AMDGPU::ONE, etc) in machine opcodes.
- continue;
- }
- } else {
- if (!TII->isALUInstr(Use->getMachineOpcode()) ||
- (TII->get(Use->getMachineOpcode()).TSFlags &
- R600_InstFlag::VECTOR)) {
- continue;
- }
-
- int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), R600Operands::IMM);
- assert(ImmIdx != -1);
-
- // subtract one from ImmIdx, because the DST operand is usually index
- // 0 for MachineInstrs, but we have no DST in the Ops vector.
- ImmIdx--;
-
- // Check that we aren't already using an immediate.
- // XXX: It's possible for an instruction to have more than one
- // immediate operand, but this is not supported yet.
- if (ImmReg == AMDGPU::ALU_LITERAL_X) {
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
- assert(C);
-
- if (C->getZExtValue() != 0) {
- // This instruction is already using an immediate.
- continue;
- }
-
- // Set the immediate value
- Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
- }
- }
- // Set the immediate register
- Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
-
- CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
- }
- break;
- }
- }
- SDNode *Result = SelectCode(N);
-
- // Fold operands of selected node
-
- const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) {
- const R600InstrInfo *TII =
- static_cast<const R600InstrInfo*>(TM.getInstrInfo());
- if (Result && Result->isMachineOpcode() &&
- !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
- && TII->isALUInstr(Result->getMachineOpcode())) {
- // Fold FNEG/FABS/CONST_ADDRESS
- // TODO: Isel can generate multiple MachineInst, we need to recursively
- // parse Result
- bool IsModified = false;
- do {
- std::vector<SDValue> Ops;
- for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
- I != E; ++I)
- Ops.push_back(*I);
- IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops);
- if (IsModified) {
- Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
- }
- } while (IsModified);
-
- // If node has a single use which is CLAMP_R600, folds it
- if (Result->hasOneUse() && Result->isMachineOpcode()) {
- SDNode *PotentialClamp = *Result->use_begin();
- if (PotentialClamp->isMachineOpcode() &&
- PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
- unsigned ClampIdx =
- TII->getOperandIdx(Result->getMachineOpcode(), R600Operands::CLAMP);
- std::vector<SDValue> Ops;
- unsigned NumOp = Result->getNumOperands();
- for (unsigned i = 0; i < NumOp; ++i) {
- Ops.push_back(Result->getOperand(i));
- }
- Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32);
- Result = CurDAG->SelectNodeTo(PotentialClamp,
- Result->getMachineOpcode(), PotentialClamp->getVTList(),
- Ops.data(), NumOp);
- }
- }
- }
- }
-
- return Result;
-}
-
-bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
- const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
- int OperandIdx[] = {
- TII->getOperandIdx(Opcode, R600Operands::SRC0),
- TII->getOperandIdx(Opcode, R600Operands::SRC1),
- TII->getOperandIdx(Opcode, R600Operands::SRC2)
- };
- int SelIdx[] = {
- TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL),
- TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL),
- TII->getOperandIdx(Opcode, R600Operands::SRC2_SEL)
- };
- int NegIdx[] = {
- TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG),
- TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG),
- TII->getOperandIdx(Opcode, R600Operands::SRC2_NEG)
- };
- int AbsIdx[] = {
- TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS),
- TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS),
- -1
- };
-
- for (unsigned i = 0; i < 3; i++) {
- if (OperandIdx[i] < 0)
- return false;
- SDValue Operand = Ops[OperandIdx[i] - 1];
- switch (Operand.getOpcode()) {
- case AMDGPUISD::CONST_ADDRESS: {
- SDValue CstOffset;
- if (Operand.getValueType().isVector() ||
- !SelectGlobalValueConstantOffset(Operand.getOperand(0), CstOffset))
- break;
-
- // Gather others constants values
- std::vector<unsigned> Consts;
- for (unsigned j = 0; j < 3; j++) {
- int SrcIdx = OperandIdx[j];
- if (SrcIdx < 0)
- break;
- if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) {
- if (Reg->getReg() == AMDGPU::ALU_CONST) {
- ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]);
- Consts.push_back(Cst->getZExtValue());
- }
- }
- }
-
- ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
- Consts.push_back(Cst->getZExtValue());
- if (!TII->fitsConstReadLimitations(Consts))
- break;
-
- Ops[OperandIdx[i] - 1] = CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32);
- Ops[SelIdx[i] - 1] = CstOffset;
- return true;
- }
- case ISD::FNEG:
- if (NegIdx[i] < 0)
- break;
- Ops[OperandIdx[i] - 1] = Operand.getOperand(0);
- Ops[NegIdx[i] - 1] = CurDAG->getTargetConstant(1, MVT::i32);
- return true;
- case ISD::FABS:
- if (AbsIdx[i] < 0)
- break;
- Ops[OperandIdx[i] - 1] = Operand.getOperand(0);
- Ops[AbsIdx[i] - 1] = CurDAG->getTargetConstant(1, MVT::i32);
- return true;
- case ISD::BITCAST:
- Ops[OperandIdx[i] - 1] = Operand.getOperand(0);
- return true;
- default:
- break;
- }
- }
- return false;
-}
-
-bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
- if (!ptr) {
- return false;
- }
- Type *ptrType = ptr->getType();
- return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
-}
-
-const Value * AMDGPUDAGToDAGISel::getBasePointerValue(const Value *V) {
- if (!V) {
- return NULL;
- }
- const Value *ret = NULL;
- ValueMap<const Value *, bool> ValueBitMap;
- std::queue<const Value *, std::list<const Value *> > ValueQueue;
- ValueQueue.push(V);
- while (!ValueQueue.empty()) {
- V = ValueQueue.front();
- if (ValueBitMap.find(V) == ValueBitMap.end()) {
- ValueBitMap[V] = true;
- if (dyn_cast<Argument>(V) && dyn_cast<PointerType>(V->getType())) {
- ret = V;
- break;
- } else if (dyn_cast<GlobalVariable>(V)) {
- ret = V;
- break;
- } else if (dyn_cast<Constant>(V)) {
- const ConstantExpr *CE = dyn_cast<ConstantExpr>(V);
- if (CE) {
- ValueQueue.push(CE->getOperand(0));
- }
- } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
- ret = AI;
- break;
- } else if (const Instruction *I = dyn_cast<Instruction>(V)) {
- uint32_t numOps = I->getNumOperands();
- for (uint32_t x = 0; x < numOps; ++x) {
- ValueQueue.push(I->getOperand(x));
- }
- } else {
- assert(!"Found a Value that we didn't know how to handle!");
- }
- }
- ValueQueue.pop();
- }
- return ret;
-}
-
-bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
- return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
-}
-
-bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int cbID) {
- if (checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)) {
- return true;
- }
- MachineMemOperand *MMO = N->getMemOperand();
- const Value *V = MMO->getValue();
- const Value *BV = getBasePointerValue(V);
- if (MMO
- && MMO->getValue()
- && ((V && dyn_cast<GlobalValue>(V))
- || (BV && dyn_cast<GlobalValue>(
- getBasePointerValue(MMO->getValue()))))) {
- return checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS);
- } else {
- return false;
- }
-}
-
-bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) {
- return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) {
- MachineMemOperand *MMO = N->getMemOperand();
- if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
- if (MMO) {
- const Value *V = MMO->getValue();
- const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
- if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
- return true;
- }
- }
- }
- return false;
-}
-
-bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) {
- if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
- // Check to make sure we are not a constant pool load or a constant load
- // that is marked as a private load
- if (isCPLoad(N) || isConstantLoad(N, -1)) {
- return false;
- }
- }
- if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
- && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
- return true;
- }
- return false;
-}
-
-const char *AMDGPUDAGToDAGISel::getPassName() const {
- return "AMDGPU DAG->DAG Pattern Instruction Selection";
-}
-
-#ifdef DEBUGTMP
-#undef INT64_C
-#endif
-#undef DEBUGTMP
-
-///==== AMDGPU Functions ====///
-
-bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
- SDValue& IntPtr) {
- if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
- IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
- return true;
- }
- return false;
-}
-
-bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
- SDValue& BaseReg, SDValue &Offset) {
- if (!dyn_cast<ConstantSDNode>(Addr)) {
- BaseReg = Addr;
- Offset = CurDAG->getIntPtrConstant(0, true);
- return true;
- }
- return false;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
- SDValue &Offset) {
- ConstantSDNode * IMMOffset;
-
- if (Addr.getOpcode() == ISD::ADD
- && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
- && isInt<16>(IMMOffset->getZExtValue())) {
-
- Base = Addr.getOperand(0);
- Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
- return true;
- // If the pointer address is constant, we can move it to the offset field.
- } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
- && isInt<16>(IMMOffset->getZExtValue())) {
- Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
- CurDAG->getEntryNode().getDebugLoc(),
- AMDGPU::ZERO, MVT::i32);
- Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
- return true;
- }
-
- // Default case, no offset
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
- SDValue &Offset) {
- ConstantSDNode *C;
-
- if ((C = dyn_cast<ConstantSDNode>(Addr))) {
- Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
- Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
- } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
- (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
- Base = Addr.getOperand(0);
- Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
- } else {
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
- }
-
- return true;
-}
-
-void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
-
- // Go over all selected nodes and try to fold them a bit more
- const AMDGPUTargetLowering& Lowering = ((const AMDGPUTargetLowering&)TLI);
- for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
- E = CurDAG->allnodes_end(); I != E; ++I) {
-
- MachineSDNode *Node = dyn_cast<MachineSDNode>(I);
- if (!Node)
- continue;
-
- SDNode *ResNode = Lowering.PostISelFolding(Node, *CurDAG);
- if (ResNode != Node)
- ReplaceUses(Node, ResNode);
- }
-}
-
diff --git a/lib/Target/R600/AMDILISelLowering.cpp b/lib/Target/R600/AMDILISelLowering.cpp
index 922cac1..970787e 100644
--- a/lib/Target/R600/AMDILISelLowering.cpp
+++ b/lib/Target/R600/AMDILISelLowering.cpp
@@ -15,7 +15,6 @@
#include "AMDGPUISelLowering.h"
#include "AMDGPURegisterInfo.h"
#include "AMDGPUSubtarget.h"
-#include "AMDILDevices.h"
#include "AMDILIntrinsicInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -40,7 +39,7 @@ using namespace llvm;
// TargetLowering Class Implementation Begins
//===----------------------------------------------------------------------===//
void AMDGPUTargetLowering::InitAMDILLowering() {
- int types[] = {
+ static const int types[] = {
(int)MVT::i8,
(int)MVT::i16,
(int)MVT::i32,
@@ -59,19 +58,19 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
(int)MVT::v2i64
};
- int IntTypes[] = {
+ static const int IntTypes[] = {
(int)MVT::i8,
(int)MVT::i16,
(int)MVT::i32,
(int)MVT::i64
};
- int FloatTypes[] = {
+ static const int FloatTypes[] = {
(int)MVT::f32,
(int)MVT::f64
};
- int VectorTypes[] = {
+ static const int VectorTypes[] = {
(int)MVT::v2i8,
(int)MVT::v4i8,
(int)MVT::v2i16,
@@ -83,10 +82,10 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
(int)MVT::v2f64,
(int)MVT::v2i64
};
- size_t NumTypes = sizeof(types) / sizeof(*types);
- size_t NumFloatTypes = sizeof(FloatTypes) / sizeof(*FloatTypes);
- size_t NumIntTypes = sizeof(IntTypes) / sizeof(*IntTypes);
- size_t NumVectorTypes = sizeof(VectorTypes) / sizeof(*VectorTypes);
+ const size_t NumTypes = array_lengthof(types);
+ const size_t NumFloatTypes = array_lengthof(FloatTypes);
+ const size_t NumIntTypes = array_lengthof(IntTypes);
+ const size_t NumVectorTypes = array_lengthof(VectorTypes);
const AMDGPUSubtarget &STM = getTargetMachine().getSubtarget<AMDGPUSubtarget>();
// These are the current register classes that are
@@ -138,8 +137,6 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
setOperationAction(ISD::SMUL_LOHI, VT, Expand);
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
- // GPU doesn't have a rotl, rotr, or byteswap instruction
- setOperationAction(ISD::ROTR, VT, Expand);
setOperationAction(ISD::BSWAP, VT, Expand);
// GPU doesn't have any counting operators
@@ -158,21 +155,19 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
setOperationAction(ISD::SELECT_CC, VT, Expand);
}
- if (STM.device()->isSupported(AMDGPUDeviceInfo::LongOps)) {
- setOperationAction(ISD::MULHU, MVT::i64, Expand);
- setOperationAction(ISD::MULHU, MVT::v2i64, Expand);
- setOperationAction(ISD::MULHS, MVT::i64, Expand);
- setOperationAction(ISD::MULHS, MVT::v2i64, Expand);
- setOperationAction(ISD::ADD, MVT::v2i64, Expand);
- setOperationAction(ISD::SREM, MVT::v2i64, Expand);
- setOperationAction(ISD::Constant , MVT::i64 , Legal);
- setOperationAction(ISD::SDIV, MVT::v2i64, Expand);
- setOperationAction(ISD::TRUNCATE, MVT::v2i64, Expand);
- setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Expand);
- setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Expand);
- setOperationAction(ISD::ANY_EXTEND, MVT::v2i64, Expand);
- }
- if (STM.device()->isSupported(AMDGPUDeviceInfo::DoubleOps)) {
+ setOperationAction(ISD::MULHU, MVT::i64, Expand);
+ setOperationAction(ISD::MULHU, MVT::v2i64, Expand);
+ setOperationAction(ISD::MULHS, MVT::i64, Expand);
+ setOperationAction(ISD::MULHS, MVT::v2i64, Expand);
+ setOperationAction(ISD::ADD, MVT::v2i64, Expand);
+ setOperationAction(ISD::SREM, MVT::v2i64, Expand);
+ setOperationAction(ISD::Constant , MVT::i64 , Legal);
+ setOperationAction(ISD::SDIV, MVT::v2i64, Expand);
+ setOperationAction(ISD::TRUNCATE, MVT::v2i64, Expand);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Expand);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Expand);
+ setOperationAction(ISD::ANY_EXTEND, MVT::v2i64, Expand);
+ if (STM.hasHWFP64()) {
// we support loading/storing v2f64 but not operations on the type
setOperationAction(ISD::FADD, MVT::v2f64, Expand);
setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
@@ -331,7 +326,7 @@ SDValue
AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const {
SDValue Data = Op.getOperand(0);
VTSDNode *BaseType = cast<VTSDNode>(Op.getOperand(1));
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT DVT = Data.getValueType();
EVT BVT = BaseType->getVT();
unsigned baseBits = BVT.getScalarType().getSizeInBits();
@@ -387,7 +382,7 @@ AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Result;
Result = DAG.getNode(
AMDGPUISD::BRANCH_COND,
- Op.getDebugLoc(),
+ SDLoc(Op),
Op.getValueType(),
Chain, Jump, Cond);
return Result;
@@ -395,7 +390,7 @@ AMDGPUTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue
AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT OVT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
@@ -476,7 +471,7 @@ AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
SDValue
AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT OVT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
@@ -547,7 +542,7 @@ AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const {
SDValue
AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT OVT = Op.getValueType();
MVT INTTY = MVT::i32;
if (OVT == MVT::v2i8) {
@@ -564,7 +559,7 @@ AMDGPUTargetLowering::LowerSREM8(SDValue Op, SelectionDAG &DAG) const {
SDValue
AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT OVT = Op.getValueType();
MVT INTTY = MVT::i32;
if (OVT == MVT::v2i16) {
@@ -581,7 +576,7 @@ AMDGPUTargetLowering::LowerSREM16(SDValue Op, SelectionDAG &DAG) const {
SDValue
AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT OVT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
diff --git a/lib/Target/R600/AMDILInstrInfo.td b/lib/Target/R600/AMDILInstrInfo.td
index 110f147..0f0c88d 100644
--- a/lib/Target/R600/AMDILInstrInfo.td
+++ b/lib/Target/R600/AMDILInstrInfo.td
@@ -10,63 +10,6 @@
// This file describes the AMDIL instructions in TableGen format.
//
//===----------------------------------------------------------------------===//
-// AMDIL Instruction Predicate Definitions
-// Predicate that is set to true if the hardware supports double precision
-// divide
-def HasHWDDiv : Predicate<"Subtarget.device()"
- "->getGeneration() > AMDGPUDeviceInfo::HD4XXX && "
- "Subtarget.device()->usesHardware(AMDGPUDeviceInfo::DoubleOps)">;
-
-// Predicate that is set to true if the hardware supports double, but not double
-// precision divide in hardware
-def HasSWDDiv : Predicate<"Subtarget.device()"
- "->getGeneration() == AMDGPUDeviceInfo::HD4XXX &&"
- "Subtarget.device()->usesHardware(AMDGPUDeviceInfo::DoubleOps)">;
-
-// Predicate that is set to true if the hardware support 24bit signed
-// math ops. Otherwise a software expansion to 32bit math ops is used instead.
-def HasHWSign24Bit : Predicate<"Subtarget.device()"
- "->getGeneration() > AMDGPUDeviceInfo::HD5XXX">;
-
-// Predicate that is set to true if 64bit operations are supported or not
-def HasHW64Bit : Predicate<"Subtarget.device()"
- "->usesHardware(AMDGPUDeviceInfo::LongOps)">;
-def HasSW64Bit : Predicate<"Subtarget.device()"
- "->usesSoftware(AMDGPUDeviceInfo::LongOps)">;
-
-// Predicate that is set to true if the timer register is supported
-def HasTmrRegister : Predicate<"Subtarget.device()"
- "->isSupported(AMDGPUDeviceInfo::TmrReg)">;
-// Predicate that is true if we are at least evergreen series
-def HasDeviceIDInst : Predicate<"Subtarget.device()"
- "->getGeneration() >= AMDGPUDeviceInfo::HD5XXX">;
-
-// Predicate that is true if we have region address space.
-def hasRegionAS : Predicate<"Subtarget.device()"
- "->usesHardware(AMDGPUDeviceInfo::RegionMem)">;
-
-// Predicate that is false if we don't have region address space.
-def noRegionAS : Predicate<"!Subtarget.device()"
- "->isSupported(AMDGPUDeviceInfo::RegionMem)">;
-
-
-// Predicate that is set to true if 64bit Mul is supported in the IL or not
-def HasHW64Mul : Predicate<"Subtarget.calVersion()"
- ">= CAL_VERSION_SC_139"
- "&& Subtarget.device()"
- "->getGeneration() >="
- "AMDGPUDeviceInfo::HD5XXX">;
-def HasSW64Mul : Predicate<"Subtarget.calVersion()"
- "< CAL_VERSION_SC_139">;
-// Predicate that is set to true if 64bit Div/Mod is supported in the IL or not
-def HasHW64DivMod : Predicate<"Subtarget.device()"
- "->usesHardware(AMDGPUDeviceInfo::HW64BitDivMod)">;
-def HasSW64DivMod : Predicate<"Subtarget.device()"
- "->usesSoftware(AMDGPUDeviceInfo::HW64BitDivMod)">;
-
-// Predicate that is set to true if 64bit pointer are used.
-def Has64BitPtr : Predicate<"Subtarget.is64bit()">;
-def Has32BitPtr : Predicate<"!Subtarget.is64bit()">;
//===--------------------------------------------------------------------===//
// Custom Operands
//===--------------------------------------------------------------------===//
@@ -175,15 +118,15 @@ class ILFormat<dag outs, dag ins, string asmstr, list<dag> pattern>
// Multiclass Instruction formats
//===--------------------------------------------------------------------===//
// Multiclass that handles branch instructions
-multiclass BranchConditional<SDNode Op> {
+multiclass BranchConditional<SDNode Op, RegisterClass rci, RegisterClass rcf> {
def _i32 : ILFormat<(outs),
- (ins brtarget:$target, GPRI32:$src0),
+ (ins brtarget:$target, rci:$src0),
"; i32 Pseudo branch instruction",
- [(Op bb:$target, GPRI32:$src0)]>;
+ [(Op bb:$target, (i32 rci:$src0))]>;
def _f32 : ILFormat<(outs),
- (ins brtarget:$target, GPRF32:$src0),
+ (ins brtarget:$target, rcf:$src0),
"; f32 Pseudo branch instruction",
- [(Op bb:$target, GPRF32:$src0)]>;
+ [(Op bb:$target, (f32 rcf:$src0))]>;
}
// Only scalar types should generate flow control
diff --git a/lib/Target/R600/AMDILIntrinsicInfo.cpp b/lib/Target/R600/AMDILIntrinsicInfo.cpp
index 4ddb057..762ee39 100644
--- a/lib/Target/R600/AMDILIntrinsicInfo.cpp
+++ b/lib/Target/R600/AMDILIntrinsicInfo.cpp
@@ -14,7 +14,6 @@
#include "AMDILIntrinsicInfo.h"
#include "AMDGPUSubtarget.h"
-#include "AMDIL.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
@@ -50,6 +49,9 @@ AMDGPUIntrinsicInfo::getName(unsigned int IntrID, Type **Tys,
unsigned int
AMDGPUIntrinsicInfo::lookupName(const char *Name, unsigned int Len) const {
+ if (!StringRef(Name, Len).startswith("llvm."))
+ return 0; // All intrinsics start with 'llvm.'
+
#define GET_FUNCTION_RECOGNIZER
#include "AMDGPUGenIntrinsics.inc"
#undef GET_FUNCTION_RECOGNIZER
diff --git a/lib/Target/R600/AMDILNIDevice.cpp b/lib/Target/R600/AMDILNIDevice.cpp
deleted file mode 100644
index 47c3f7f..0000000
--- a/lib/Target/R600/AMDILNIDevice.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-//===-- AMDILNIDevice.cpp - Device Info for Northern Islands devices ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#include "AMDILNIDevice.h"
-#include "AMDGPUSubtarget.h"
-#include "AMDILEvergreenDevice.h"
-
-using namespace llvm;
-
-AMDGPUNIDevice::AMDGPUNIDevice(AMDGPUSubtarget *ST)
- : AMDGPUEvergreenDevice(ST) {
- std::string name = ST->getDeviceName();
- if (name == "caicos") {
- DeviceFlag = OCL_DEVICE_CAICOS;
- } else if (name == "turks") {
- DeviceFlag = OCL_DEVICE_TURKS;
- } else if (name == "cayman") {
- DeviceFlag = OCL_DEVICE_CAYMAN;
- } else {
- DeviceFlag = OCL_DEVICE_BARTS;
- }
-}
-AMDGPUNIDevice::~AMDGPUNIDevice() {
-}
-
-size_t
-AMDGPUNIDevice::getMaxLDSSize() const {
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return MAX_LDS_SIZE_900;
- } else {
- return 0;
- }
-}
-
-uint32_t
-AMDGPUNIDevice::getGeneration() const {
- return AMDGPUDeviceInfo::HD6XXX;
-}
-
-
-AMDGPUCaymanDevice::AMDGPUCaymanDevice(AMDGPUSubtarget *ST)
- : AMDGPUNIDevice(ST) {
- setCaps();
-}
-
-AMDGPUCaymanDevice::~AMDGPUCaymanDevice() {
-}
-
-void
-AMDGPUCaymanDevice::setCaps() {
- if (mSTM->isOverride(AMDGPUDeviceInfo::DoubleOps)) {
- mHWBits.set(AMDGPUDeviceInfo::DoubleOps);
- mHWBits.set(AMDGPUDeviceInfo::FMA);
- }
- mHWBits.set(AMDGPUDeviceInfo::Signed24BitOps);
- mSWBits.reset(AMDGPUDeviceInfo::Signed24BitOps);
- mSWBits.set(AMDGPUDeviceInfo::ArenaSegment);
-}
-
diff --git a/lib/Target/R600/AMDILNIDevice.h b/lib/Target/R600/AMDILNIDevice.h
deleted file mode 100644
index 24a6408..0000000
--- a/lib/Target/R600/AMDILNIDevice.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//===------- AMDILNIDevice.h - Define NI Device for AMDIL -*- C++ -*------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-/// \file
-/// \brief Interface for the subtarget data classes.
-///
-/// This file will define the interface that each generation needs to
-/// implement in order to correctly answer queries on the capabilities of the
-/// specific hardware.
-//===---------------------------------------------------------------------===//
-#ifndef AMDILNIDEVICE_H
-#define AMDILNIDEVICE_H
-#include "AMDGPUSubtarget.h"
-#include "AMDILEvergreenDevice.h"
-
-namespace llvm {
-
-class AMDGPUSubtarget;
-//===---------------------------------------------------------------------===//
-// NI generation of devices and their respective sub classes
-//===---------------------------------------------------------------------===//
-
-/// \brief The AMDGPUNIDevice is the base class for all Northern Island series of
-/// cards.
-///
-/// It is very similiar to the AMDGPUEvergreenDevice, with the major
-/// exception being differences in wavefront size and hardware capabilities. The
-/// NI devices are all 64 wide wavefronts and also add support for signed 24 bit
-/// integer operations
-class AMDGPUNIDevice : public AMDGPUEvergreenDevice {
-public:
- AMDGPUNIDevice(AMDGPUSubtarget*);
- virtual ~AMDGPUNIDevice();
- virtual size_t getMaxLDSSize() const;
- virtual uint32_t getGeneration() const;
-};
-
-/// Just as the AMDGPUCypressDevice is the double capable version of the
-/// AMDGPUEvergreenDevice, the AMDGPUCaymanDevice is the double capable version
-/// of the AMDGPUNIDevice. The other major difference is that the Cayman Device
-/// has 4 wide ALU's, whereas the rest of the NI family is a 5 wide.
-class AMDGPUCaymanDevice: public AMDGPUNIDevice {
-public:
- AMDGPUCaymanDevice(AMDGPUSubtarget*);
- virtual ~AMDGPUCaymanDevice();
-private:
- virtual void setCaps();
-};
-
-static const unsigned int MAX_LDS_SIZE_900 = AMDGPUDevice::MAX_LDS_SIZE_800;
-} // namespace llvm
-#endif // AMDILNIDEVICE_H
diff --git a/lib/Target/R600/AMDILSIDevice.cpp b/lib/Target/R600/AMDILSIDevice.cpp
deleted file mode 100644
index 0d1de3d..0000000
--- a/lib/Target/R600/AMDILSIDevice.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//===-- AMDILSIDevice.cpp - Device Info for Southern Islands GPUs ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-#include "AMDILSIDevice.h"
-#include "AMDGPUSubtarget.h"
-#include "AMDILEvergreenDevice.h"
-#include "AMDILNIDevice.h"
-
-using namespace llvm;
-
-AMDGPUSIDevice::AMDGPUSIDevice(AMDGPUSubtarget *ST)
- : AMDGPUEvergreenDevice(ST) {
-}
-AMDGPUSIDevice::~AMDGPUSIDevice() {
-}
-
-size_t
-AMDGPUSIDevice::getMaxLDSSize() const {
- if (usesHardware(AMDGPUDeviceInfo::LocalMem)) {
- return MAX_LDS_SIZE_900;
- } else {
- return 0;
- }
-}
-
-uint32_t
-AMDGPUSIDevice::getGeneration() const {
- return AMDGPUDeviceInfo::HD7XXX;
-}
-
-std::string
-AMDGPUSIDevice::getDataLayout() const {
- return std::string(
- "e"
- "-p:64:64:64"
- "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64"
- "-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128"
- "-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024"
- "-v2048:2048:2048"
- "-n32:64"
- );
-}
diff --git a/lib/Target/R600/AMDILSIDevice.h b/lib/Target/R600/AMDILSIDevice.h
deleted file mode 100644
index 5b2cb25..0000000
--- a/lib/Target/R600/AMDILSIDevice.h
+++ /dev/null
@@ -1,39 +0,0 @@
-//===------- AMDILSIDevice.h - Define SI Device for AMDIL -*- C++ -*------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface for the subtarget data classes.
-///
-/// This file will define the interface that each generation needs to
-/// implement in order to correctly answer queries on the capabilities of the
-/// specific hardware.
-//===---------------------------------------------------------------------===//
-#ifndef AMDILSIDEVICE_H
-#define AMDILSIDEVICE_H
-#include "AMDILEvergreenDevice.h"
-
-namespace llvm {
-class AMDGPUSubtarget;
-//===---------------------------------------------------------------------===//
-// SI generation of devices and their respective sub classes
-//===---------------------------------------------------------------------===//
-
-/// \brief The AMDGPUSIDevice is the base class for all Southern Island series
-/// of cards.
-class AMDGPUSIDevice : public AMDGPUEvergreenDevice {
-public:
- AMDGPUSIDevice(AMDGPUSubtarget*);
- virtual ~AMDGPUSIDevice();
- virtual size_t getMaxLDSSize() const;
- virtual uint32_t getGeneration() const;
- virtual std::string getDataLayout() const;
-};
-
-} // namespace llvm
-#endif // AMDILSIDEVICE_H
diff --git a/lib/Target/R600/CMakeLists.txt b/lib/Target/R600/CMakeLists.txt
index 97f0a40..9f8f6a8 100644
--- a/lib/Target/R600/CMakeLists.txt
+++ b/lib/Target/R600/CMakeLists.txt
@@ -12,28 +12,22 @@ tablegen(LLVM AMDGPUGenAsmWriter.inc -gen-asm-writer)
add_public_tablegen_target(AMDGPUCommonTableGen)
add_llvm_target(R600CodeGen
- AMDIL7XXDevice.cpp
AMDILCFGStructurizer.cpp
- AMDILDevice.cpp
- AMDILDeviceInfo.cpp
- AMDILEvergreenDevice.cpp
AMDILIntrinsicInfo.cpp
- AMDILISelDAGToDAG.cpp
AMDILISelLowering.cpp
- AMDILNIDevice.cpp
- AMDILSIDevice.cpp
AMDGPUAsmPrinter.cpp
AMDGPUFrameLowering.cpp
- AMDGPUIndirectAddressing.cpp
+ AMDGPUISelDAGToDAG.cpp
AMDGPUMCInstLower.cpp
AMDGPUMachineFunction.cpp
AMDGPUSubtarget.cpp
- AMDGPUStructurizeCFG.cpp
AMDGPUTargetMachine.cpp
+ AMDGPUTargetTransformInfo.cpp
AMDGPUISelLowering.cpp
AMDGPUConvertToISA.cpp
AMDGPUInstrInfo.cpp
AMDGPURegisterInfo.cpp
+ R600ClauseMergePass.cpp
R600ControlFlowFinalizer.cpp
R600EmitClauseMarkers.cpp
R600ExpandSpecialInstrs.cpp
@@ -41,18 +35,22 @@ add_llvm_target(R600CodeGen
R600ISelLowering.cpp
R600MachineFunctionInfo.cpp
R600MachineScheduler.cpp
+ R600OptimizeVectorRegisters.cpp
R600Packetizer.cpp
R600RegisterInfo.cpp
+ R600TextureIntrinsicsReplacer.cpp
SIAnnotateControlFlow.cpp
+ SIFixSGPRCopies.cpp
SIInsertWaits.cpp
SIInstrInfo.cpp
SIISelLowering.cpp
SILowerControlFlow.cpp
SIMachineFunctionInfo.cpp
SIRegisterInfo.cpp
+ SITypeRewriter.cpp
)
-add_dependencies(LLVMR600CodeGen intrinsics_gen)
+add_dependencies(LLVMR600CodeGen AMDGPUCommonTableGen intrinsics_gen)
add_subdirectory(InstPrinter)
add_subdirectory(TargetInfo)
diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
index 303cdf2..99e1377 100644
--- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
+++ b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
@@ -10,8 +10,8 @@
#include "AMDGPUInstPrinter.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
-#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
using namespace llvm;
@@ -23,6 +23,63 @@ void AMDGPUInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
printAnnotation(OS, Annot);
}
+void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O) {
+ switch (reg) {
+ case AMDGPU::VCC:
+ O << "vcc";
+ return;
+ case AMDGPU::SCC:
+ O << "scc";
+ return;
+ case AMDGPU::EXEC:
+ O << "exec";
+ return;
+ case AMDGPU::M0:
+ O << "m0";
+ return;
+ default:
+ break;
+ }
+
+ // It's seems there's no way to use SIRegisterInfo here, and dealing with the
+ // giant enum of all the different shifted sets of registers is pretty
+ // unmanagable, so parse the name and reformat it to be prettier.
+ StringRef Name(getRegisterName(reg));
+
+ std::pair<StringRef, StringRef> Split = Name.split('_');
+ StringRef SubRegName = Split.first;
+ StringRef Rest = Split.second;
+
+ if (SubRegName.size() <= 4) { // Must at least be as long as "SGPR"/"VGPR".
+ O << Name;
+ return;
+ }
+
+ unsigned RegIndex;
+ StringRef RegIndexStr = SubRegName.drop_front(4);
+
+ if (RegIndexStr.getAsInteger(10, RegIndex)) {
+ O << Name;
+ return;
+ }
+
+ if (SubRegName.front() == 'V')
+ O << 'v';
+ else if (SubRegName.front() == 'S')
+ O << 's';
+ else {
+ O << Name;
+ return;
+ }
+
+ if (Rest.empty()) // Only 1 32-bit register
+ O << RegIndex;
+ else {
+ unsigned NumReg = Rest.count('_') + 2;
+ O << '[' << RegIndex << ':' << (RegIndex + NumReg - 1) << ']';
+ }
+}
+
void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
@@ -30,8 +87,12 @@ void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
if (Op.isReg()) {
switch (Op.getReg()) {
// This is the default predicate state, so we don't need to print it.
- case AMDGPU::PRED_SEL_OFF: break;
- default: O << getRegisterName(Op.getReg()); break;
+ case AMDGPU::PRED_SEL_OFF:
+ break;
+
+ default:
+ printRegOperand(Op.getReg(), O);
+ break;
}
} else if (Op.isImm()) {
O << Op.getImm();
@@ -102,7 +163,7 @@ void AMDGPUInstPrinter::printLiteral(const MCInst *MI, unsigned OpNo,
void AMDGPUInstPrinter::printLast(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- printIfSet(MI, OpNo, O.indent(20 - O.GetNumBytesInBuffer()), "*", " ");
+ printIfSet(MI, OpNo, O.indent(25 - O.GetNumBytesInBuffer()), "*", " ");
}
void AMDGPUInstPrinter::printNeg(const MCInst *MI, unsigned OpNo,
@@ -178,13 +239,13 @@ void AMDGPUInstPrinter::printBankSwizzle(const MCInst *MI, unsigned OpNo,
int BankSwizzle = MI->getOperand(OpNo).getImm();
switch (BankSwizzle) {
case 1:
- O << "BS:VEC_021";
+ O << "BS:VEC_021/SCL_122";
break;
case 2:
- O << "BS:VEC_120";
+ O << "BS:VEC_120/SCL_212";
break;
case 3:
- O << "BS:VEC_102";
+ O << "BS:VEC_102/SCL_221";
break;
case 4:
O << "BS:VEC_201";
@@ -198,6 +259,51 @@ void AMDGPUInstPrinter::printBankSwizzle(const MCInst *MI, unsigned OpNo,
return;
}
+void AMDGPUInstPrinter::printRSel(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned Sel = MI->getOperand(OpNo).getImm();
+ switch (Sel) {
+ case 0:
+ O << "X";
+ break;
+ case 1:
+ O << "Y";
+ break;
+ case 2:
+ O << "Z";
+ break;
+ case 3:
+ O << "W";
+ break;
+ case 4:
+ O << "0";
+ break;
+ case 5:
+ O << "1";
+ break;
+ case 7:
+ O << "_";
+ break;
+ default:
+ break;
+ }
+}
+
+void AMDGPUInstPrinter::printCT(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned CT = MI->getOperand(OpNo).getImm();
+ switch (CT) {
+ case 0:
+ O << "U";
+ break;
+ case 1:
+ O << "N";
+ break;
+ default:
+ break;
+ }
+}
+
void AMDGPUInstPrinter::printKCache(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
int KCacheMode = MI->getOperand(OpNo).getImm();
@@ -210,4 +316,21 @@ void AMDGPUInstPrinter::printKCache(const MCInst *MI, unsigned OpNo,
}
}
+void AMDGPUInstPrinter::printWaitFlag(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ // Note: Mask values are taken from SIInsertWaits.cpp and not from ISA docs
+ // SIInsertWaits.cpp bits usage does not match ISA docs description but it
+ // works so it might be a misprint in docs.
+ unsigned SImm16 = MI->getOperand(OpNo).getImm();
+ unsigned Vmcnt = SImm16 & 0xF;
+ unsigned Expcnt = (SImm16 >> 4) & 0xF;
+ unsigned Lgkmcnt = (SImm16 >> 8) & 0xF;
+ if (Vmcnt != 0xF)
+ O << "vmcnt(" << Vmcnt << ") ";
+ if (Expcnt != 0x7)
+ O << "expcnt(" << Expcnt << ") ";
+ if (Lgkmcnt != 0x7)
+ O << "lgkmcnt(" << Lgkmcnt << ")";
+}
+
#include "AMDGPUGenAsmWriter.inc"
diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
index c6fd053..77af942 100644
--- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
+++ b/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
@@ -32,6 +32,7 @@ public:
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
private:
+ void printRegOperand(unsigned RegNo, raw_ostream &O);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printInterpSlot(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printMemOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
@@ -49,7 +50,10 @@ private:
void printWrite(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printBankSwizzle(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printRSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printCT(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printKCache(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printWaitFlag(const MCInst *MI, unsigned OpNo, raw_ostream &O);
};
} // End namespace llvm
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
index a3397f3..29d0acf 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
@@ -82,6 +82,8 @@ void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
// ELFAMDGPUAsmBackend class
//===----------------------------------------------------------------------===//
+namespace {
+
class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
public:
ELFAMDGPUAsmBackend(const Target &T) : AMDGPUAsmBackend(T) { }
@@ -91,7 +93,11 @@ public:
}
};
-MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T, StringRef TT,
+} // end anonymous namespace
+
+MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
StringRef CPU) {
return new ELFAMDGPUAsmBackend(T);
}
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
index 2aae26a..4a8e1b0 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
@@ -11,7 +11,7 @@
#include "AMDGPUMCAsmInfo.h"
using namespace llvm;
-AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
+AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfo() {
HasSingleParameterDotFile = false;
WeakDefDirective = 0;
//===------------------------------------------------------------------===//
@@ -21,7 +21,6 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
HasStaticCtorDtorReferenceInStaticMode = false;
LinkerRequiresNonEmptyDwarfLines = true;
MaxInstLength = 16;
- PCSymbol = "$";
SeparatorString = "\n";
CommentColumn = 40;
CommentString = ";";
@@ -32,9 +31,6 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
InlineAsmStart = ";#ASMSTART";
InlineAsmEnd = ";#ASMEND";
AssemblerDialect = 0;
- AllowQuotesInName = false;
- AllowNameToStartWithDigit = false;
- AllowPeriodsInName = false;
//===--- Data Emission Directives -------------------------------------===//
ZeroDirective = ".zero";
@@ -56,13 +52,11 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
//===--- Global Variable Emission Directives --------------------------===//
GlobalDirective = ".global";
- ExternDirective = ".extern";
HasSetDirective = false;
HasAggressiveSymbolFolding = true;
COMMDirectiveAlignmentIsInBytes = false;
HasDotTypeDotSizeDirective = false;
HasNoDeadStrip = true;
- HasSymbolResolver = false;
WeakRefDirective = ".weakref\t";
LinkOnceDirective = 0;
//===--- Dwarf Emission Directives -----------------------------------===//
@@ -70,11 +64,6 @@ AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
SupportsDebugInformation = true;
}
-const char*
-AMDGPUMCAsmInfo::getDataASDirective(unsigned int Size, unsigned int AS) const {
- return 0;
-}
-
const MCSection*
AMDGPUMCAsmInfo::getNonexecutableStackSection(MCContext &CTX) const {
return 0;
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
index 3ad0fa6..22afd63 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
@@ -17,13 +17,11 @@
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
-class Target;
class StringRef;
class AMDGPUMCAsmInfo : public MCAsmInfo {
public:
- explicit AMDGPUMCAsmInfo(const Target &T, StringRef &TT);
- const char* getDataASDirective(unsigned int Size, unsigned int AS) const;
+ explicit AMDGPUMCAsmInfo(StringRef &TT);
const MCSection* getNonexecutableStackSection(MCContext &CTX) const;
};
} // namespace llvm
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
new file mode 100644
index 0000000..521b3b3
--- /dev/null
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
@@ -0,0 +1,21 @@
+//===-- AMDGPUCodeEmitter.cpp - AMDGPU Code Emitter interface -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief CodeEmitter interface for R600 and SI codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUMCCodeEmitter.h"
+
+using namespace llvm;
+
+// pin vtable to this file
+void AMDGPUMCCodeEmitter::anchor() {}
+
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
index cd3a7ce..d8cf64a 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
@@ -24,6 +24,7 @@ class MCInst;
class MCOperand;
class AMDGPUMCCodeEmitter : public MCCodeEmitter {
+ virtual void anchor();
public:
uint64_t getBinaryCodeForInstr(const MCInst &MI,
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
index 61d70bb..a1bec28 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
@@ -88,7 +88,7 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
MCCodeEmitter *_Emitter,
bool RelaxAll,
bool NoExecStack) {
- return createELFStreamer(Ctx, MAB, _OS, _Emitter, false, false);
+ return createELFStreamer(Ctx, 0, MAB, _OS, _Emitter, false, false);
}
extern "C" void LLVMInitializeR600TargetMC() {
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
index abb0320..f6b3376 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
@@ -40,8 +40,8 @@ MCCodeEmitter *createSIMCCodeEmitter(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
MCContext &Ctx);
-MCAsmBackend *createAMDGPUAsmBackend(const Target &T, StringRef TT,
- StringRef CPU);
+MCAsmBackend *createAMDGPUAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
MCObjectWriter *createAMDGPUELFObjectWriter(raw_ostream &OS);
} // End llvm namespace
diff --git a/lib/Target/R600/MCTargetDesc/CMakeLists.txt b/lib/Target/R600/MCTargetDesc/CMakeLists.txt
index 3ccdf42..98f6925 100644
--- a/lib/Target/R600/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/R600/MCTargetDesc/CMakeLists.txt
@@ -2,6 +2,7 @@
add_llvm_library(LLVMR600Desc
AMDGPUAsmBackend.cpp
AMDGPUELFObjectWriter.cpp
+ AMDGPUMCCodeEmitter.cpp
AMDGPUMCTargetDesc.cpp
AMDGPUMCAsmInfo.cpp
R600MCCodeEmitter.cpp
diff --git a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
index cb4cf0c..dd8df65 100644
--- a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
+++ b/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
@@ -24,7 +24,6 @@
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/raw_ostream.h"
-#include <stdio.h>
using namespace llvm;
@@ -81,21 +80,6 @@ enum FCInstr {
FC_CONTINUE
};
-enum TextureTypes {
- TEXTURE_1D = 1,
- TEXTURE_2D,
- TEXTURE_3D,
- TEXTURE_CUBE,
- TEXTURE_RECT,
- TEXTURE_SHADOW1D,
- TEXTURE_SHADOW2D,
- TEXTURE_SHADOWRECT,
- TEXTURE_1D_ARRAY,
- TEXTURE_2D_ARRAY,
- TEXTURE_SHADOW1D_ARRAY,
- TEXTURE_SHADOW2D_ARRAY
-};
-
MCCodeEmitter *llvm::createR600MCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
@@ -114,69 +98,37 @@ void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
} else if (IS_VTX(Desc)) {
uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
- InstWord2 |= 1 << 19;
+ if (!(STI.getFeatureBits() & AMDGPU::FeatureCaymanISA)) {
+ InstWord2 |= 1 << 19; // Mega-Fetch bit
+ }
Emit(InstWord01, OS);
Emit(InstWord2, OS);
- Emit((u_int32_t) 0, OS);
+ Emit((uint32_t) 0, OS);
} else if (IS_TEX(Desc)) {
- unsigned Opcode = MI.getOpcode();
- bool HasOffsets = (Opcode == AMDGPU::TEX_LD);
- unsigned OpOffset = HasOffsets ? 3 : 0;
- int64_t Sampler = MI.getOperand(OpOffset + 3).getImm();
- int64_t TextureType = MI.getOperand(OpOffset + 4).getImm();
-
- uint32_t SrcSelect[4] = {0, 1, 2, 3};
- uint32_t Offsets[3] = {0, 0, 0};
- uint64_t CoordType[4] = {1, 1, 1, 1};
-
- if (HasOffsets)
- for (unsigned i = 0; i < 3; i++) {
- int SignedOffset = MI.getOperand(i + 2).getImm();
- Offsets[i] = (SignedOffset & 0x1F);
- }
-
- if (TextureType == TEXTURE_RECT ||
- TextureType == TEXTURE_SHADOWRECT) {
- CoordType[ELEMENT_X] = 0;
- CoordType[ELEMENT_Y] = 0;
- }
-
- if (TextureType == TEXTURE_1D_ARRAY ||
- TextureType == TEXTURE_SHADOW1D_ARRAY) {
- if (Opcode == AMDGPU::TEX_SAMPLE_C_L ||
- Opcode == AMDGPU::TEX_SAMPLE_C_LB) {
- CoordType[ELEMENT_Y] = 0;
- } else {
- CoordType[ELEMENT_Z] = 0;
- SrcSelect[ELEMENT_Z] = ELEMENT_Y;
- }
- } else if (TextureType == TEXTURE_2D_ARRAY ||
- TextureType == TEXTURE_SHADOW2D_ARRAY) {
- CoordType[ELEMENT_Z] = 0;
- }
-
-
- if ((TextureType == TEXTURE_SHADOW1D ||
- TextureType == TEXTURE_SHADOW2D ||
- TextureType == TEXTURE_SHADOWRECT ||
- TextureType == TEXTURE_SHADOW1D_ARRAY) &&
- Opcode != AMDGPU::TEX_SAMPLE_C_L &&
- Opcode != AMDGPU::TEX_SAMPLE_C_LB) {
- SrcSelect[ELEMENT_W] = ELEMENT_Z;
- }
-
- uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups) |
- CoordType[ELEMENT_X] << 60 | CoordType[ELEMENT_Y] << 61 |
- CoordType[ELEMENT_Z] << 62 | CoordType[ELEMENT_W] << 63;
- uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 |
- SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 |
- SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 |
- Offsets[2] << 10;
-
- Emit(Word01, OS);
- Emit(Word2, OS);
- Emit((u_int32_t) 0, OS);
+ int64_t Sampler = MI.getOperand(14).getImm();
+
+ int64_t SrcSelect[4] = {
+ MI.getOperand(2).getImm(),
+ MI.getOperand(3).getImm(),
+ MI.getOperand(4).getImm(),
+ MI.getOperand(5).getImm()
+ };
+ int64_t Offsets[3] = {
+ MI.getOperand(6).getImm() & 0x1F,
+ MI.getOperand(7).getImm() & 0x1F,
+ MI.getOperand(8).getImm() & 0x1F
+ };
+
+ uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups);
+ uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 |
+ SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 |
+ SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 |
+ Offsets[2] << 10;
+
+ Emit(Word01, OS);
+ Emit(Word2, OS);
+ Emit((uint32_t) 0, OS);
} else {
uint64_t Inst = getBinaryCodeForInstr(MI, Fixups);
if ((STI.getFeatureBits() & AMDGPU::FeatureR600ALUInst) &&
diff --git a/lib/Target/R600/Processors.td b/lib/Target/R600/Processors.td
index 0cbe919..ee190e4 100644
--- a/lib/Target/R600/Processors.td
+++ b/lib/Target/R600/Processors.td
@@ -10,39 +10,45 @@
class Proc<string Name, ProcessorItineraries itin, list<SubtargetFeature> Features>
: Processor<Name, itin, Features>;
def : Proc<"", R600_VLIW5_Itin,
- [FeatureR600ALUInst, FeatureVertexCache]>;
+ [FeatureR600, FeatureVertexCache]>;
def : Proc<"r600", R600_VLIW5_Itin,
- [FeatureR600ALUInst , FeatureVertexCache]>;
+ [FeatureR600 , FeatureVertexCache]>;
def : Proc<"rs880", R600_VLIW5_Itin,
- [FeatureR600ALUInst]>;
+ [FeatureR600]>;
def : Proc<"rv670", R600_VLIW5_Itin,
- [FeatureR600ALUInst, FeatureFP64, FeatureVertexCache]>;
+ [FeatureR600, FeatureFP64, FeatureVertexCache]>;
def : Proc<"rv710", R600_VLIW5_Itin,
- [FeatureVertexCache]>;
+ [FeatureR700, FeatureVertexCache]>;
def : Proc<"rv730", R600_VLIW5_Itin,
- [FeatureVertexCache]>;
+ [FeatureR700, FeatureVertexCache]>;
def : Proc<"rv770", R600_VLIW5_Itin,
- [FeatureFP64, FeatureVertexCache]>;
+ [FeatureR700, FeatureFP64, FeatureVertexCache]>;
def : Proc<"cedar", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureVertexCache]>;
def : Proc<"redwood", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureVertexCache]>;
def : Proc<"sumo", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages]>;
+ [FeatureEvergreen]>;
def : Proc<"juniper", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureVertexCache]>;
def : Proc<"cypress", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureFP64, FeatureVertexCache]>;
+ [FeatureEvergreen, FeatureFP64, FeatureVertexCache]>;
def : Proc<"barts", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureVertexCache]>;
+ [FeatureNorthernIslands, FeatureVertexCache]>;
def : Proc<"turks", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages, FeatureVertexCache]>;
+ [FeatureNorthernIslands, FeatureVertexCache]>;
def : Proc<"caicos", R600_VLIW5_Itin,
- [FeatureByteAddress, FeatureImages]>;
+ [FeatureNorthernIslands]>;
def : Proc<"cayman", R600_VLIW4_Itin,
- [FeatureByteAddress, FeatureImages, FeatureFP64]>;def : Proc<"SI", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
-def : Proc<"tahiti", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
-def : Proc<"pitcairn", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
-def : Proc<"verde", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
-def : Proc<"oland", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
-def : Proc<"hainan", SI_Itin, [Feature64BitPtr, FeatureFP64]>;
+ [FeatureNorthernIslands, FeatureFP64, FeatureCaymanISA]>;
+
+def : Proc<"SI", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"tahiti", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"pitcairn", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"verde", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"oland", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"hainan", SI_Itin, [FeatureSouthernIslands]>;
+def : Proc<"bonaire", SI_Itin, [FeatureSeaIslands]>;
+def : Proc<"kabini", SI_Itin, [FeatureSeaIslands]>;
+def : Proc<"kaveri", SI_Itin, [FeatureSeaIslands]>;
+def : Proc<"hawaii", SI_Itin, [FeatureSeaIslands]>;
diff --git a/lib/Target/R600/R600ClauseMergePass.cpp b/lib/Target/R600/R600ClauseMergePass.cpp
new file mode 100644
index 0000000..33d2ca3
--- /dev/null
+++ b/lib/Target/R600/R600ClauseMergePass.cpp
@@ -0,0 +1,204 @@
+//===-- R600ClauseMergePass - Merge consecutive CF_ALU -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// R600EmitClauseMarker pass emits CFAlu instruction in a conservative maneer.
+/// This pass is merging consecutive CFAlus where applicable.
+/// It needs to be called after IfCvt for best results.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "r600mergeclause"
+#include "AMDGPU.h"
+#include "R600Defines.h"
+#include "R600InstrInfo.h"
+#include "R600MachineFunctionInfo.h"
+#include "R600RegisterInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+
+static bool isCFAlu(const MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case AMDGPU::CF_ALU:
+ case AMDGPU::CF_ALU_PUSH_BEFORE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+class R600ClauseMergePass : public MachineFunctionPass {
+
+private:
+ static char ID;
+ const R600InstrInfo *TII;
+
+ unsigned getCFAluSize(const MachineInstr *MI) const;
+ bool isCFAluEnabled(const MachineInstr *MI) const;
+
+ /// IfCvt pass can generate "disabled" ALU clause marker that need to be
+ /// removed and their content affected to the previous alu clause.
+ /// This function parse instructions after CFAlu untill it find a disabled
+ /// CFAlu and merge the content, or an enabled CFAlu.
+ void cleanPotentialDisabledCFAlu(MachineInstr *CFAlu) const;
+
+ /// Check whether LatrCFAlu can be merged into RootCFAlu and do it if
+ /// it is the case.
+ bool mergeIfPossible(MachineInstr *RootCFAlu, const MachineInstr *LatrCFAlu)
+ const;
+
+public:
+ R600ClauseMergePass(TargetMachine &tm) : MachineFunctionPass(ID) { }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const;
+};
+
+char R600ClauseMergePass::ID = 0;
+
+unsigned R600ClauseMergePass::getCFAluSize(const MachineInstr *MI) const {
+ assert(isCFAlu(MI));
+ return MI->getOperand(
+ TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::COUNT)).getImm();
+}
+
+bool R600ClauseMergePass::isCFAluEnabled(const MachineInstr *MI) const {
+ assert(isCFAlu(MI));
+ return MI->getOperand(
+ TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::Enabled)).getImm();
+}
+
+void R600ClauseMergePass::cleanPotentialDisabledCFAlu(MachineInstr *CFAlu)
+ const {
+ int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT);
+ MachineBasicBlock::iterator I = CFAlu, E = CFAlu->getParent()->end();
+ I++;
+ do {
+ while (I!= E && !isCFAlu(I))
+ I++;
+ if (I == E)
+ return;
+ MachineInstr *MI = I++;
+ if (isCFAluEnabled(MI))
+ break;
+ CFAlu->getOperand(CntIdx).setImm(getCFAluSize(CFAlu) + getCFAluSize(MI));
+ MI->eraseFromParent();
+ } while (I != E);
+}
+
+bool R600ClauseMergePass::mergeIfPossible(MachineInstr *RootCFAlu,
+ const MachineInstr *LatrCFAlu) const {
+ assert(isCFAlu(RootCFAlu) && isCFAlu(LatrCFAlu));
+ int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT);
+ unsigned RootInstCount = getCFAluSize(RootCFAlu),
+ LaterInstCount = getCFAluSize(LatrCFAlu);
+ unsigned CumuledInsts = RootInstCount + LaterInstCount;
+ if (CumuledInsts >= TII->getMaxAlusPerClause()) {
+ DEBUG(dbgs() << "Excess inst counts\n");
+ return false;
+ }
+ if (RootCFAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
+ return false;
+ // Is KCache Bank 0 compatible ?
+ int Mode0Idx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_MODE0);
+ int KBank0Idx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK0);
+ int KBank0LineIdx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR0);
+ if (LatrCFAlu->getOperand(Mode0Idx).getImm() &&
+ RootCFAlu->getOperand(Mode0Idx).getImm() &&
+ (LatrCFAlu->getOperand(KBank0Idx).getImm() !=
+ RootCFAlu->getOperand(KBank0Idx).getImm() ||
+ LatrCFAlu->getOperand(KBank0LineIdx).getImm() !=
+ RootCFAlu->getOperand(KBank0LineIdx).getImm())) {
+ DEBUG(dbgs() << "Wrong KC0\n");
+ return false;
+ }
+ // Is KCache Bank 1 compatible ?
+ int Mode1Idx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_MODE1);
+ int KBank1Idx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK1);
+ int KBank1LineIdx =
+ TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR1);
+ if (LatrCFAlu->getOperand(Mode1Idx).getImm() &&
+ RootCFAlu->getOperand(Mode1Idx).getImm() &&
+ (LatrCFAlu->getOperand(KBank1Idx).getImm() !=
+ RootCFAlu->getOperand(KBank1Idx).getImm() ||
+ LatrCFAlu->getOperand(KBank1LineIdx).getImm() !=
+ RootCFAlu->getOperand(KBank1LineIdx).getImm())) {
+ DEBUG(dbgs() << "Wrong KC0\n");
+ return false;
+ }
+ if (LatrCFAlu->getOperand(Mode0Idx).getImm()) {
+ RootCFAlu->getOperand(Mode0Idx).setImm(
+ LatrCFAlu->getOperand(Mode0Idx).getImm());
+ RootCFAlu->getOperand(KBank0Idx).setImm(
+ LatrCFAlu->getOperand(KBank0Idx).getImm());
+ RootCFAlu->getOperand(KBank0LineIdx).setImm(
+ LatrCFAlu->getOperand(KBank0LineIdx).getImm());
+ }
+ if (LatrCFAlu->getOperand(Mode1Idx).getImm()) {
+ RootCFAlu->getOperand(Mode1Idx).setImm(
+ LatrCFAlu->getOperand(Mode1Idx).getImm());
+ RootCFAlu->getOperand(KBank1Idx).setImm(
+ LatrCFAlu->getOperand(KBank1Idx).getImm());
+ RootCFAlu->getOperand(KBank1LineIdx).setImm(
+ LatrCFAlu->getOperand(KBank1LineIdx).getImm());
+ }
+ RootCFAlu->getOperand(CntIdx).setImm(CumuledInsts);
+ RootCFAlu->setDesc(TII->get(LatrCFAlu->getOpcode()));
+ return true;
+}
+
+bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
+ for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
+ BB != BB_E; ++BB) {
+ MachineBasicBlock &MBB = *BB;
+ MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ MachineBasicBlock::iterator LatestCFAlu = E;
+ while (I != E) {
+ MachineInstr *MI = I++;
+ if ((!TII->canBeConsideredALU(MI) && !isCFAlu(MI)) ||
+ TII->mustBeLastInClause(MI->getOpcode()))
+ LatestCFAlu = E;
+ if (!isCFAlu(MI))
+ continue;
+ cleanPotentialDisabledCFAlu(MI);
+
+ if (LatestCFAlu != E && mergeIfPossible(LatestCFAlu, MI)) {
+ MI->eraseFromParent();
+ } else {
+ assert(MI->getOperand(8).getImm() && "CF ALU instruction disabled");
+ LatestCFAlu = MI;
+ }
+ }
+ }
+ return false;
+}
+
+const char *R600ClauseMergePass::getPassName() const {
+ return "R600 Merge Clause Markers Pass";
+}
+
+} // end anonymous namespace
+
+
+llvm::FunctionPass *llvm::createR600ClauseMergePass(TargetMachine &TM) {
+ return new R600ClauseMergePass(TM);
+}
diff --git a/lib/Target/R600/R600ControlFlowFinalizer.cpp b/lib/Target/R600/R600ControlFlowFinalizer.cpp
index ffe3414..ac3d8f6 100644
--- a/lib/Target/R600/R600ControlFlowFinalizer.cpp
+++ b/lib/Target/R600/R600ControlFlowFinalizer.cpp
@@ -14,8 +14,6 @@
#define DEBUG_TYPE "r600cf"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
#include "AMDGPU.h"
#include "R600Defines.h"
#include "R600InstrInfo.h"
@@ -24,8 +22,11 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
-namespace llvm {
+namespace {
class R600ControlFlowFinalizer : public MachineFunctionPass {
@@ -48,7 +49,7 @@ private:
static char ID;
const R600InstrInfo *TII;
- const R600RegisterInfo &TRI;
+ const R600RegisterInfo *TRI;
unsigned MaxFetchInst;
const AMDGPUSubtarget &ST;
@@ -64,7 +65,7 @@ private:
const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
unsigned Opcode = 0;
- bool isEg = (ST.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX);
+ bool isEg = (ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
switch (CFI) {
case CF_TC:
Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
@@ -97,7 +98,7 @@ private:
Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
break;
case CF_END:
- if (ST.device()->getDeviceFlag() == OCL_DEVICE_CAYMAN) {
+ if (ST.hasCaymanISA()) {
Opcode = AMDGPU::CF_END_CM;
break;
}
@@ -109,28 +110,33 @@ private:
}
bool isCompatibleWithClause(const MachineInstr *MI,
- std::set<unsigned> &DstRegs, std::set<unsigned> &SrcRegs) const {
+ std::set<unsigned> &DstRegs) const {
unsigned DstMI, SrcMI;
for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
E = MI->operands_end(); I != E; ++I) {
const MachineOperand &MO = *I;
if (!MO.isReg())
continue;
- if (MO.isDef())
- DstMI = MO.getReg();
+ if (MO.isDef()) {
+ unsigned Reg = MO.getReg();
+ if (AMDGPU::R600_Reg128RegClass.contains(Reg))
+ DstMI = Reg;
+ else
+ DstMI = TRI->getMatchingSuperReg(Reg,
+ TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
+ &AMDGPU::R600_Reg128RegClass);
+ }
if (MO.isUse()) {
unsigned Reg = MO.getReg();
if (AMDGPU::R600_Reg128RegClass.contains(Reg))
SrcMI = Reg;
else
- SrcMI = TRI.getMatchingSuperReg(Reg,
- TRI.getSubRegFromChannel(TRI.getHWRegChan(Reg)),
+ SrcMI = TRI->getMatchingSuperReg(Reg,
+ TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
&AMDGPU::R600_Reg128RegClass);
}
}
- if ((DstRegs.find(SrcMI) == DstRegs.end()) &&
- (SrcRegs.find(DstMI) == SrcRegs.end())) {
- SrcRegs.insert(SrcMI);
+ if ((DstRegs.find(SrcMI) == DstRegs.end())) {
DstRegs.insert(DstMI);
return true;
} else
@@ -144,16 +150,16 @@ private:
std::vector<MachineInstr *> ClauseContent;
unsigned AluInstCount = 0;
bool IsTex = TII->usesTextureCache(ClauseHead);
- std::set<unsigned> DstRegs, SrcRegs;
+ std::set<unsigned> DstRegs;
for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
if (IsTrivialInst(I))
continue;
- if (AluInstCount > MaxFetchInst)
+ if (AluInstCount >= MaxFetchInst)
break;
if ((IsTex && !TII->usesTextureCache(I)) ||
(!IsTex && !TII->usesVertexCache(I)))
break;
- if (!isCompatibleWithClause(I, DstRegs, SrcRegs))
+ if (!isCompatibleWithClause(I, DstRegs))
break;
AluInstCount ++;
ClauseContent.push_back(I);
@@ -166,28 +172,26 @@ private:
}
void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const {
- unsigned LiteralRegs[] = {
+ static const unsigned LiteralRegs[] = {
AMDGPU::ALU_LITERAL_X,
AMDGPU::ALU_LITERAL_Y,
AMDGPU::ALU_LITERAL_Z,
AMDGPU::ALU_LITERAL_W
};
- for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg())
- continue;
- if (MO.getReg() != AMDGPU::ALU_LITERAL_X)
+ const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs =
+ TII->getSrcs(MI);
+ for (unsigned i = 0, e = Srcs.size(); i < e; ++i) {
+ if (Srcs[i].first->getReg() != AMDGPU::ALU_LITERAL_X)
continue;
- unsigned ImmIdx = TII->getOperandIdx(MI->getOpcode(), R600Operands::IMM);
- int64_t Imm = MI->getOperand(ImmIdx).getImm();
+ int64_t Imm = Srcs[i].second;
std::vector<int64_t>::iterator It =
std::find(Lits.begin(), Lits.end(), Imm);
if (It != Lits.end()) {
unsigned Index = It - Lits.begin();
- MO.setReg(LiteralRegs[Index]);
+ Srcs[i].first->setReg(LiteralRegs[Index]);
} else {
assert(Lits.size() < 4 && "Too many literals in Instruction Group");
- MO.setReg(LiteralRegs[Lits.size()]);
+ Srcs[i].first->setReg(LiteralRegs[Lits.size()]);
Lits.push_back(Imm);
}
}
@@ -252,6 +256,7 @@ private:
ClauseContent.push_back(MILit);
}
}
+ assert(ClauseContent.size() < 128 && "ALU clause is too big");
ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1);
return ClauseFile(ClauseHead, ClauseContent);
}
@@ -272,6 +277,7 @@ private:
void
EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
unsigned &CfCount) {
+ Clause.first->getOperand(0).setImm(0);
CounterPropagateAddr(Clause.first, CfCount);
MachineBasicBlock *BB = Clause.first->getParent();
BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE))
@@ -295,34 +301,35 @@ private:
}
unsigned getHWStackSize(unsigned StackSubEntry, bool hasPush) const {
- switch (ST.device()->getGeneration()) {
- case AMDGPUDeviceInfo::HD4XXX:
+ switch (ST.getGeneration()) {
+ case AMDGPUSubtarget::R600:
+ case AMDGPUSubtarget::R700:
if (hasPush)
StackSubEntry += 2;
break;
- case AMDGPUDeviceInfo::HD5XXX:
+ case AMDGPUSubtarget::EVERGREEN:
if (hasPush)
StackSubEntry ++;
- case AMDGPUDeviceInfo::HD6XXX:
+ case AMDGPUSubtarget::NORTHERN_ISLANDS:
StackSubEntry += 2;
break;
+ default: llvm_unreachable("Not a VLIW4/VLIW5 GPU");
}
return (StackSubEntry + 3)/4; // Need ceil value of StackSubEntry/4
}
public:
R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
- TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
- TRI(TII->getRegisterInfo()),
+ TII (0), TRI(0),
ST(tm.getSubtarget<AMDGPUSubtarget>()) {
const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
- MaxFetchInst = 8;
- else
- MaxFetchInst = 16;
+ MaxFetchInst = ST.getTexVTXClauseSize();
}
virtual bool runOnMachineFunction(MachineFunction &MF) {
+ TII=static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
+ TRI=static_cast<const R600RegisterInfo *>(MF.getTarget().getRegisterInfo());
+
unsigned MaxStack = 0;
unsigned CurrentStack = 0;
bool HasPush = false;
@@ -340,6 +347,9 @@ public:
MaxStack = 1;
}
std::vector<ClauseFile> FetchClauses, AluClauses;
+ std::vector<MachineInstr *> LastAlu(1);
+ std::vector<MachineInstr *> ToPopAfter;
+
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E;) {
if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
@@ -350,6 +360,10 @@ public:
}
MachineBasicBlock::iterator MI = I;
+ if (MI->getOpcode() != AMDGPU::ENDIF)
+ LastAlu.back() = 0;
+ if (MI->getOpcode() == AMDGPU::CF_ALU)
+ LastAlu.back() = MI;
I++;
switch (MI->getOpcode()) {
case AMDGPU::CF_ALU_PUSH_BEFORE:
@@ -359,12 +373,6 @@ public:
case AMDGPU::CF_ALU:
I = MI;
AluClauses.push_back(MakeALUClause(MBB, I));
- case AMDGPU::EG_ExportBuf:
- case AMDGPU::EG_ExportSwz:
- case AMDGPU::R600_ExportBuf:
- case AMDGPU::R600_ExportSwz:
- case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
- case AMDGPU::RAT_WRITE_CACHELESS_128_eg:
DEBUG(dbgs() << CfCount << ":"; MI->dump(););
CfCount++;
break;
@@ -395,6 +403,7 @@ public:
break;
}
case AMDGPU::IF_PREDICATE_SET: {
+ LastAlu.push_back(0);
MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
getHWInstrDesc(CF_JUMP))
.addImm(0)
@@ -412,7 +421,7 @@ public:
MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
getHWInstrDesc(CF_ELSE))
.addImm(0)
- .addImm(1);
+ .addImm(0);
DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
IfThenElseStack.push_back(MIb);
MI->eraseFromParent();
@@ -421,31 +430,31 @@ public:
}
case AMDGPU::ENDIF: {
CurrentStack--;
+ if (LastAlu.back()) {
+ ToPopAfter.push_back(LastAlu.back());
+ } else {
+ MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
+ getHWInstrDesc(CF_POP))
+ .addImm(CfCount + 1)
+ .addImm(1);
+ (void)MIb;
+ DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
+ CfCount++;
+ }
+
MachineInstr *IfOrElseInst = IfThenElseStack.back();
IfThenElseStack.pop_back();
- CounterPropagateAddr(IfOrElseInst, CfCount + 1);
- MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
- getHWInstrDesc(CF_POP))
- .addImm(CfCount + 1)
- .addImm(1);
- (void)MIb;
- DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
+ CounterPropagateAddr(IfOrElseInst, CfCount);
+ IfOrElseInst->getOperand(1).setImm(1);
+ LastAlu.pop_back();
MI->eraseFromParent();
- CfCount++;
break;
}
- case AMDGPU::PREDICATED_BREAK: {
- CurrentStack--;
- CfCount += 3;
- BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP))
- .addImm(CfCount)
- .addImm(1);
+ case AMDGPU::BREAK: {
+ CfCount ++;
MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
getHWInstrDesc(CF_LOOP_BREAK))
.addImm(0);
- BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_POP))
- .addImm(CfCount)
- .addImm(1);
LoopStack.back().second.insert(MIb);
MI->eraseFromParent();
break;
@@ -473,9 +482,28 @@ public:
EmitALUClause(I, AluClauses[i], CfCount);
}
default:
+ if (TII->isExport(MI->getOpcode())) {
+ DEBUG(dbgs() << CfCount << ":"; MI->dump(););
+ CfCount++;
+ }
break;
}
}
+ for (unsigned i = 0, e = ToPopAfter.size(); i < e; ++i) {
+ MachineInstr *Alu = ToPopAfter[i];
+ BuildMI(MBB, Alu, MBB.findDebugLoc((MachineBasicBlock::iterator)Alu),
+ TII->get(AMDGPU::CF_ALU_POP_AFTER))
+ .addImm(Alu->getOperand(0).getImm())
+ .addImm(Alu->getOperand(1).getImm())
+ .addImm(Alu->getOperand(2).getImm())
+ .addImm(Alu->getOperand(3).getImm())
+ .addImm(Alu->getOperand(4).getImm())
+ .addImm(Alu->getOperand(5).getImm())
+ .addImm(Alu->getOperand(6).getImm())
+ .addImm(Alu->getOperand(7).getImm())
+ .addImm(Alu->getOperand(8).getImm());
+ Alu->eraseFromParent();
+ }
MFI->StackSize = getHWStackSize(MaxStack, HasPush);
}
@@ -489,7 +517,7 @@ public:
char R600ControlFlowFinalizer::ID = 0;
-}
+} // end anonymous namespace
llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h
index 36bfb18..1781f2a 100644
--- a/lib/Target/R600/R600Defines.h
+++ b/lib/Target/R600/R600Defines.h
@@ -41,7 +41,12 @@ namespace R600_InstFlag {
OP1 = (1 << 10),
OP2 = (1 << 11),
VTX_INST = (1 << 12),
- TEX_INST = (1 << 13)
+ TEX_INST = (1 << 13),
+ ALU_INST = (1 << 14),
+ LDS_1A = (1 << 15),
+ LDS_1A1D = (1 << 16),
+ IS_EXPORT = (1 << 17),
+ LDS_1A2D = (1 << 18)
};
}
@@ -57,47 +62,82 @@ namespace R600_InstFlag {
#define IS_VTX(desc) ((desc).TSFlags & R600_InstFlag::VTX_INST)
#define IS_TEX(desc) ((desc).TSFlags & R600_InstFlag::TEX_INST)
-namespace R600Operands {
- enum Ops {
- DST,
- UPDATE_EXEC_MASK,
- UPDATE_PREDICATE,
- WRITE,
- OMOD,
- DST_REL,
- CLAMP,
- SRC0,
- SRC0_NEG,
- SRC0_REL,
- SRC0_ABS,
- SRC0_SEL,
- SRC1,
- SRC1_NEG,
- SRC1_REL,
- SRC1_ABS,
- SRC1_SEL,
- SRC2,
- SRC2_NEG,
- SRC2_REL,
- SRC2_SEL,
- LAST,
- PRED_SEL,
- IMM,
- BANK_SWIZZLE,
- COUNT
+namespace OpName {
+
+ enum VecOps {
+ UPDATE_EXEC_MASK_X,
+ UPDATE_PREDICATE_X,
+ WRITE_X,
+ OMOD_X,
+ DST_REL_X,
+ CLAMP_X,
+ SRC0_X,
+ SRC0_NEG_X,
+ SRC0_REL_X,
+ SRC0_ABS_X,
+ SRC0_SEL_X,
+ SRC1_X,
+ SRC1_NEG_X,
+ SRC1_REL_X,
+ SRC1_ABS_X,
+ SRC1_SEL_X,
+ PRED_SEL_X,
+ UPDATE_EXEC_MASK_Y,
+ UPDATE_PREDICATE_Y,
+ WRITE_Y,
+ OMOD_Y,
+ DST_REL_Y,
+ CLAMP_Y,
+ SRC0_Y,
+ SRC0_NEG_Y,
+ SRC0_REL_Y,
+ SRC0_ABS_Y,
+ SRC0_SEL_Y,
+ SRC1_Y,
+ SRC1_NEG_Y,
+ SRC1_REL_Y,
+ SRC1_ABS_Y,
+ SRC1_SEL_Y,
+ PRED_SEL_Y,
+ UPDATE_EXEC_MASK_Z,
+ UPDATE_PREDICATE_Z,
+ WRITE_Z,
+ OMOD_Z,
+ DST_REL_Z,
+ CLAMP_Z,
+ SRC0_Z,
+ SRC0_NEG_Z,
+ SRC0_REL_Z,
+ SRC0_ABS_Z,
+ SRC0_SEL_Z,
+ SRC1_Z,
+ SRC1_NEG_Z,
+ SRC1_REL_Z,
+ SRC1_ABS_Z,
+ SRC1_SEL_Z,
+ PRED_SEL_Z,
+ UPDATE_EXEC_MASK_W,
+ UPDATE_PREDICATE_W,
+ WRITE_W,
+ OMOD_W,
+ DST_REL_W,
+ CLAMP_W,
+ SRC0_W,
+ SRC0_NEG_W,
+ SRC0_REL_W,
+ SRC0_ABS_W,
+ SRC0_SEL_W,
+ SRC1_W,
+ SRC1_NEG_W,
+ SRC1_REL_W,
+ SRC1_ABS_W,
+ SRC1_SEL_W,
+ PRED_SEL_W,
+ IMM_0,
+ IMM_1,
+ VEC_COUNT
};
- const static int ALUOpTable[3][R600Operands::COUNT] = {
-// W C S S S S S S S S S S S
-// R O D L S R R R R S R R R R S R R R L P
-// D U I M R A R C C C C R C C C C R C C C A R I
-// S E U T O E M C 0 0 0 0 C 1 1 1 1 C 2 2 2 S E M B
-// T M P E D L P 0 N R A S 1 N R A S 2 N R S T D M S
- {0,-1,-1, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,-1,-1,-1,10,11,12,13},
- {0, 1, 2, 3, 4 ,5 ,6 ,7, 8, 9,10,11,12,13,14,15,16,-1,-1,-1,-1,17,18,19,20},
- {0,-1,-1,-1,-1, 1, 2, 3, 4, 5,-1, 6, 7, 8, 9,-1,10,11,12,13,14,15,16,17,18}
- };
-
}
//===----------------------------------------------------------------------===//
@@ -126,4 +166,6 @@ namespace R600Operands {
#define R_028878_SQ_PGM_RESOURCES_GS 0x028878
#define R_0288D4_SQ_PGM_RESOURCES_LS 0x0288d4
+#define R_0288E8_SQ_LDS_ALLOC 0x0288E8
+
#endif // R600DEFINES_H_
diff --git a/lib/Target/R600/R600EmitClauseMarkers.cpp b/lib/Target/R600/R600EmitClauseMarkers.cpp
index 3fdc678..1bbfd2b 100644
--- a/lib/Target/R600/R600EmitClauseMarkers.cpp
+++ b/lib/Target/R600/R600EmitClauseMarkers.cpp
@@ -23,21 +23,23 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-namespace llvm {
+using namespace llvm;
+
+namespace {
class R600EmitClauseMarkersPass : public MachineFunctionPass {
private:
static char ID;
const R600InstrInfo *TII;
+ int Address;
unsigned OccupiedDwords(MachineInstr *MI) const {
switch (MI->getOpcode()) {
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
- case AMDGPU::DOT4_eg_pseudo:
- case AMDGPU::DOT4_r600_pseudo:
+ case AMDGPU::DOT_4:
return 4;
case AMDGPU::KILL:
return 0;
@@ -45,6 +47,11 @@ private:
break;
}
+ // These will be expanded to two ALU instructions in the
+ // ExpandSpecialInstructions pass.
+ if (TII->isLDSRetInstr(MI->getOpcode()))
+ return 2;
+
if(TII->isVector(*MI) ||
TII->isCubeOp(MI->getOpcode()) ||
TII->isReductionOp(MI->getOpcode()))
@@ -71,8 +78,7 @@ private:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
case AMDGPU::COPY:
- case AMDGPU::DOT4_eg_pseudo:
- case AMDGPU::DOT4_r600_pseudo:
+ case AMDGPU::DOT_4:
return true;
default:
return false;
@@ -83,37 +89,13 @@ private:
switch (MI->getOpcode()) {
case AMDGPU::KILL:
case AMDGPU::RETURN:
+ case AMDGPU::IMPLICIT_DEF:
return true;
default:
return false;
}
}
- // Register Idx, then Const value
- std::vector<std::pair<unsigned, unsigned> > ExtractConstRead(MachineInstr *MI)
- const {
- const R600Operands::Ops OpTable[3][2] = {
- {R600Operands::SRC0, R600Operands::SRC0_SEL},
- {R600Operands::SRC1, R600Operands::SRC1_SEL},
- {R600Operands::SRC2, R600Operands::SRC2_SEL},
- };
- std::vector<std::pair<unsigned, unsigned> > Result;
-
- if (!TII->isALUInstr(MI->getOpcode()))
- return Result;
- for (unsigned j = 0; j < 3; j++) {
- int SrcIdx = TII->getOperandIdx(MI->getOpcode(), OpTable[j][0]);
- if (SrcIdx < 0)
- break;
- if (MI->getOperand(SrcIdx).getReg() == AMDGPU::ALU_CONST) {
- unsigned Const = MI->getOperand(
- TII->getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
- Result.push_back(std::pair<unsigned, unsigned>(SrcIdx, Const));
- }
- }
- return Result;
- }
-
std::pair<unsigned, unsigned> getAccessedBankLine(unsigned Sel) const {
// Sel is (512 + (kc_bank << 12) + ConstIndex) << 2
// (See also R600ISelLowering.cpp)
@@ -129,11 +111,20 @@ private:
}
bool SubstituteKCacheBank(MachineInstr *MI,
- std::vector<std::pair<unsigned, unsigned> > &CachedConsts) const {
+ std::vector<std::pair<unsigned, unsigned> > &CachedConsts,
+ bool UpdateInstr = true) const {
std::vector<std::pair<unsigned, unsigned> > UsedKCache;
- std::vector<std::pair<unsigned, unsigned> > Consts = ExtractConstRead(MI);
- assert(TII->isALUInstr(MI->getOpcode()) && "Can't assign Const");
+
+ if (!TII->isALUInstr(MI->getOpcode()) && MI->getOpcode() != AMDGPU::DOT_4)
+ return true;
+
+ const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Consts =
+ TII->getSrcs(MI);
+ assert((TII->isALUInstr(MI->getOpcode()) ||
+ MI->getOpcode() == AMDGPU::DOT_4) && "Can't assign Const");
for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
+ if (Consts[i].first->getReg() != AMDGPU::ALU_CONST)
+ continue;
unsigned Sel = Consts[i].second;
unsigned Chan = Sel & 3, Index = ((Sel >> 2) - 512) & 31;
unsigned KCacheIndex = Index * 4 + Chan;
@@ -159,25 +150,77 @@ private:
return false;
}
- for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
- switch(UsedKCache[i].first) {
+ if (!UpdateInstr)
+ return true;
+
+ for (unsigned i = 0, j = 0, n = Consts.size(); i < n; ++i) {
+ if (Consts[i].first->getReg() != AMDGPU::ALU_CONST)
+ continue;
+ switch(UsedKCache[j].first) {
case 0:
- MI->getOperand(Consts[i].first).setReg(
- AMDGPU::R600_KC0RegClass.getRegister(UsedKCache[i].second));
+ Consts[i].first->setReg(
+ AMDGPU::R600_KC0RegClass.getRegister(UsedKCache[j].second));
break;
case 1:
- MI->getOperand(Consts[i].first).setReg(
- AMDGPU::R600_KC1RegClass.getRegister(UsedKCache[i].second));
+ Consts[i].first->setReg(
+ AMDGPU::R600_KC1RegClass.getRegister(UsedKCache[j].second));
break;
default:
llvm_unreachable("Wrong Cache Line");
}
+ j++;
+ }
+ return true;
+ }
+
+ bool canClauseLocalKillFitInClause(
+ unsigned AluInstCount,
+ std::vector<std::pair<unsigned, unsigned> > KCacheBanks,
+ MachineBasicBlock::iterator Def,
+ MachineBasicBlock::iterator BBEnd) {
+ const R600RegisterInfo &TRI = TII->getRegisterInfo();
+ for (MachineInstr::const_mop_iterator
+ MOI = Def->operands_begin(),
+ MOE = Def->operands_end(); MOI != MOE; ++MOI) {
+ if (!MOI->isReg() || !MOI->isDef() ||
+ TRI.isPhysRegLiveAcrossClauses(MOI->getReg()))
+ continue;
+
+ // Def defines a clause local register, so check that its use will fit
+ // in the clause.
+ unsigned LastUseCount = 0;
+ for (MachineBasicBlock::iterator UseI = Def; UseI != BBEnd; ++UseI) {
+ AluInstCount += OccupiedDwords(UseI);
+ // Make sure we won't need to end the clause due to KCache limitations.
+ if (!SubstituteKCacheBank(UseI, KCacheBanks, false))
+ return false;
+
+ // We have reached the maximum instruction limit before finding the
+ // use that kills this register, so we cannot use this def in the
+ // current clause.
+ if (AluInstCount >= TII->getMaxAlusPerClause())
+ return false;
+
+ // Register kill flags have been cleared by the time we get to this
+ // pass, but it is safe to assume that all uses of this register
+ // occur in the same basic block as its definition, because
+ // it is illegal for the scheduler to schedule them in
+ // different blocks.
+ if (UseI->findRegisterUseOperandIdx(MOI->getReg()))
+ LastUseCount = AluInstCount;
+
+ if (UseI != Def && UseI->findRegisterDefOperandIdx(MOI->getReg()) != -1)
+ break;
+ }
+ if (LastUseCount)
+ return LastUseCount <= TII->getMaxAlusPerClause();
+ llvm_unreachable("Clause local register live at end of clause.");
}
return true;
}
MachineBasicBlock::iterator
- MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const {
+ MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) {
MachineBasicBlock::iterator ClauseHead = I;
std::vector<std::pair<unsigned, unsigned> > KCacheBanks;
bool PushBeforeModifier = false;
@@ -190,39 +233,66 @@ private:
if (AluInstCount > TII->getMaxAlusPerClause())
break;
if (I->getOpcode() == AMDGPU::PRED_X) {
+ // We put PRED_X in its own clause to ensure that ifcvt won't create
+ // clauses with more than 128 insts.
+ // IfCvt is indeed checking that "then" and "else" branches of an if
+ // statement have less than ~60 insts thus converted clauses can't be
+ // bigger than ~121 insts (predicate setter needs to be in the same
+ // clause as predicated alus).
+ if (AluInstCount > 0)
+ break;
if (TII->getFlagOp(I).getImm() & MO_FLAG_PUSH)
PushBeforeModifier = true;
AluInstCount ++;
continue;
}
- if (I->getOpcode() == AMDGPU::KILLGT) {
+ // XXX: GROUP_BARRIER instructions cannot be in the same ALU clause as:
+ //
+ // * KILL or INTERP instructions
+ // * Any instruction that sets UPDATE_EXEC_MASK or UPDATE_PRED bits
+ // * Uses waterfalling (i.e. INDEX_MODE = AR.X)
+ //
+ // XXX: These checks have not been implemented yet.
+ if (TII->mustBeLastInClause(I->getOpcode())) {
I++;
break;
}
- if (TII->isALUInstr(I->getOpcode()) &&
- !SubstituteKCacheBank(I, KCacheBanks))
+
+ // If this instruction defines a clause local register, make sure
+ // its use can fit in this clause.
+ if (!canClauseLocalKillFitInClause(AluInstCount, KCacheBanks, I, E))
+ break;
+
+ if (!SubstituteKCacheBank(I, KCacheBanks))
break;
AluInstCount += OccupiedDwords(I);
}
unsigned Opcode = PushBeforeModifier ?
AMDGPU::CF_ALU_PUSH_BEFORE : AMDGPU::CF_ALU;
BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead), TII->get(Opcode))
- .addImm(0) // ADDR
+ // We don't use the ADDR field until R600ControlFlowFinalizer pass, where
+ // it is safe to assume it is 0. However if we always put 0 here, the ifcvt
+ // pass may assume that identical ALU clause starter at the beginning of a
+ // true and false branch can be factorized which is not the case.
+ .addImm(Address++) // ADDR
.addImm(KCacheBanks.empty()?0:KCacheBanks[0].first) // KB0
.addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].first) // KB1
.addImm(KCacheBanks.empty()?0:2) // KM0
.addImm((KCacheBanks.size() < 2)?0:2) // KM1
.addImm(KCacheBanks.empty()?0:KCacheBanks[0].second) // KLINE0
.addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].second) // KLINE1
- .addImm(AluInstCount); // COUNT
+ .addImm(AluInstCount) // COUNT
+ .addImm(1); // Enabled
return I;
}
public:
R600EmitClauseMarkersPass(TargetMachine &tm) : MachineFunctionPass(ID),
- TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) { }
+ TII(0), Address(0) { }
virtual bool runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
+
for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
BB != BB_E; ++BB) {
MachineBasicBlock &MBB = *BB;
@@ -246,7 +316,7 @@ public:
char R600EmitClauseMarkersPass::ID = 0;
-}
+} // end anonymous namespace
llvm::FunctionPass *llvm::createR600EmitClauseMarkers(TargetMachine &TM) {
diff --git a/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/lib/Target/R600/R600ExpandSpecialInstrs.cpp
index f8c900f..aeee4aa 100644
--- a/lib/Target/R600/R600ExpandSpecialInstrs.cpp
+++ b/lib/Target/R600/R600ExpandSpecialInstrs.cpp
@@ -38,7 +38,7 @@ private:
public:
R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID),
- TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) { }
+ TII(0) { }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -56,6 +56,7 @@ FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) {
}
bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
const R600RegisterInfo &TRI = TII->getRegisterInfo();
@@ -67,6 +68,23 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
MachineInstr &MI = *I;
I = llvm::next(I);
+ // Expand LDS_*_RET instructions
+ if (TII->isLDSRetInstr(MI.getOpcode())) {
+ int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
+ assert(DstIdx != -1);
+ MachineOperand &DstOp = MI.getOperand(DstIdx);
+ MachineInstr *Mov = TII->buildMovInstr(&MBB, I,
+ DstOp.getReg(), AMDGPU::OQAP);
+ DstOp.setReg(AMDGPU::OQAP);
+ int LDSPredSelIdx = TII->getOperandIdx(MI.getOpcode(),
+ AMDGPU::OpName::pred_sel);
+ int MovPredSelIdx = TII->getOperandIdx(Mov->getOpcode(),
+ AMDGPU::OpName::pred_sel);
+ // Copy the pred_sel bit
+ Mov->getOperand(MovPredSelIdx).setReg(
+ MI.getOperand(LDSPredSelIdx).getReg());
+ }
+
switch (MI.getOpcode()) {
default: break;
// Expand PRED_X to one of the PRED_SET instructions.
@@ -81,28 +99,13 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
AMDGPU::ZERO); // src1
TII->addFlag(PredSet, 0, MO_FLAG_MASK);
if (Flags & MO_FLAG_PUSH) {
- TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1);
+ TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
} else {
- TII->setImmOperand(PredSet, R600Operands::UPDATE_PREDICATE, 1);
+ TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1);
}
MI.eraseFromParent();
continue;
}
- case AMDGPU::BREAK: {
- MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I,
- AMDGPU::PRED_SETE_INT,
- AMDGPU::PREDICATE_BIT,
- AMDGPU::ZERO,
- AMDGPU::ZERO);
- TII->addFlag(PredSet, 0, MO_FLAG_MASK);
- TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1);
-
- BuildMI(MBB, I, MBB.findDebugLoc(I),
- TII->get(AMDGPU::PREDICATED_BREAK))
- .addReg(AMDGPU::PREDICATE_BIT);
- MI.eraseFromParent();
- continue;
- }
case AMDGPU::INTERP_PAIR_XY: {
MachineInstr *BMI;
@@ -182,6 +185,45 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
MI.eraseFromParent();
continue;
}
+ case AMDGPU::DOT_4: {
+
+ const R600RegisterInfo &TRI = TII->getRegisterInfo();
+
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
+
+ for (unsigned Chan = 0; Chan < 4; ++Chan) {
+ bool Mask = (Chan != TRI.getHWRegChan(DstReg));
+ unsigned SubDstReg =
+ AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
+ MachineInstr *BMI =
+ TII->buildSlotOfVectorInstruction(MBB, &MI, Chan, SubDstReg);
+ if (Chan > 0) {
+ BMI->bundleWithPred();
+ }
+ if (Mask) {
+ TII->addFlag(BMI, 0, MO_FLAG_MASK);
+ }
+ if (Chan != 3)
+ TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
+ unsigned Opcode = BMI->getOpcode();
+ // While not strictly necessary from hw point of view, we force
+ // all src operands of a dot4 inst to belong to the same slot.
+ unsigned Src0 = BMI->getOperand(
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
+ .getReg();
+ unsigned Src1 = BMI->getOperand(
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1))
+ .getReg();
+ (void) Src0;
+ (void) Src1;
+ if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
+ (TRI.getEncodingValue(Src1) & 0xff) < 127)
+ assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
+ }
+ MI.eraseFromParent();
+ continue;
+ }
}
bool IsReduction = TII->isReductionOp(MI.getOpcode());
@@ -218,14 +260,14 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
// T0_W = CUBE T1_Y, T1_Z
for (unsigned Chan = 0; Chan < 4; Chan++) {
unsigned DstReg = MI.getOperand(
- TII->getOperandIdx(MI, R600Operands::DST)).getReg();
+ TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg();
unsigned Src0 = MI.getOperand(
- TII->getOperandIdx(MI, R600Operands::SRC0)).getReg();
+ TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
unsigned Src1 = 0;
// Determine the correct source registers
if (!IsCube) {
- int Src1Idx = TII->getOperandIdx(MI, R600Operands::SRC1);
+ int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1);
if (Src1Idx != -1) {
Src1 = MI.getOperand(Src1Idx).getReg();
}
@@ -268,12 +310,6 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
case AMDGPU::CUBE_eg_pseudo:
Opcode = AMDGPU::CUBE_eg_real;
break;
- case AMDGPU::DOT4_r600_pseudo:
- Opcode = AMDGPU::DOT4_r600_real;
- break;
- case AMDGPU::DOT4_eg_pseudo:
- Opcode = AMDGPU::DOT4_eg_real;
- break;
default:
break;
}
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index 7252235..0fcb488 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -16,6 +16,7 @@
#include "R600Defines.h"
#include "R600InstrInfo.h"
#include "R600MachineFunctionInfo.h"
+#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -27,41 +28,40 @@ using namespace llvm;
R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
AMDGPUTargetLowering(TM),
- TII(static_cast<const R600InstrInfo*>(TM.getInstrInfo())) {
+ Gen(TM.getSubtarget<AMDGPUSubtarget>().getGeneration()) {
addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass);
addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass);
addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass);
addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass);
+ addRegisterClass(MVT::v2f32, &AMDGPU::R600_Reg64RegClass);
+ addRegisterClass(MVT::v2i32, &AMDGPU::R600_Reg64RegClass);
+
computeRegisterProperties();
- setOperationAction(ISD::FADD, MVT::v4f32, Expand);
- setOperationAction(ISD::FMUL, MVT::v4f32, Expand);
- setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
- setOperationAction(ISD::FSUB, MVT::v4f32, Expand);
-
- setOperationAction(ISD::ADD, MVT::v4i32, Expand);
- setOperationAction(ISD::AND, MVT::v4i32, Expand);
- setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Expand);
- setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Expand);
- setOperationAction(ISD::MUL, MVT::v2i32, Expand);
- setOperationAction(ISD::MUL, MVT::v4i32, Expand);
- setOperationAction(ISD::OR, MVT::v4i32, Expand);
- setOperationAction(ISD::OR, MVT::v2i32, Expand);
- setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Expand);
- setOperationAction(ISD::SHL, MVT::v4i32, Expand);
- setOperationAction(ISD::SHL, MVT::v2i32, Expand);
- setOperationAction(ISD::SRL, MVT::v4i32, Expand);
- setOperationAction(ISD::SRL, MVT::v2i32, Expand);
- setOperationAction(ISD::SRA, MVT::v4i32, Expand);
- setOperationAction(ISD::SRA, MVT::v2i32, Expand);
- setOperationAction(ISD::SUB, MVT::v4i32, Expand);
- setOperationAction(ISD::SUB, MVT::v2i32, Expand);
- setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Expand);
- setOperationAction(ISD::UDIV, MVT::v4i32, Expand);
- setOperationAction(ISD::UREM, MVT::v4i32, Expand);
+ // Set condition code actions
+ setCondCodeAction(ISD::SETO, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETLT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETLE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::f32, Expand);
+
+ setCondCodeAction(ISD::SETLE, MVT::i32, Expand);
+ setCondCodeAction(ISD::SETLT, MVT::i32, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::i32, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::i32, Expand);
+
+ setOperationAction(ISD::FCOS, MVT::f32, Custom);
+ setOperationAction(ISD::FSIN, MVT::f32, Custom);
+
setOperationAction(ISD::SETCC, MVT::v4i32, Expand);
- setOperationAction(ISD::XOR, MVT::v4i32, Expand);
- setOperationAction(ISD::XOR, MVT::v2i32, Expand);
+ setOperationAction(ISD::SETCC, MVT::v2i32, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
@@ -72,8 +72,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i1, Custom);
- setOperationAction(ISD::ROTL, MVT::i32, Custom);
-
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
@@ -81,24 +79,33 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setOperationAction(ISD::SETCC, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i1, Custom);
- setOperationAction(ISD::SELECT, MVT::i32, Custom);
- setOperationAction(ISD::SELECT, MVT::f32, Custom);
-
- setOperationAction(ISD::VSELECT, MVT::v4i32, Expand);
- setOperationAction(ISD::VSELECT, MVT::v2i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::v2f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
// Legalize loads and stores to the private address space.
setOperationAction(ISD::LOAD, MVT::i32, Custom);
setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Custom);
- setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
+
+ // EXTLOAD should be the same as ZEXTLOAD. It is legal for some address
+ // spaces, so it is custom lowered to handle those where it isn't.
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Custom);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i16, Custom);
+
setOperationAction(ISD::STORE, MVT::i8, Custom);
setOperationAction(ISD::STORE, MVT::i32, Custom);
setOperationAction(ISD::STORE, MVT::v2i32, Custom);
setOperationAction(ISD::STORE, MVT::v4i32, Custom);
+ setTruncStoreAction(MVT::i32, MVT::i8, Custom);
+ setTruncStoreAction(MVT::i32, MVT::i16, Custom);
setOperationAction(ISD::LOAD, MVT::i32, Custom);
setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
@@ -108,10 +115,13 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setTargetDAGCombine(ISD::FP_TO_SINT);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::SELECT_CC);
+ setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setBooleanContents(ZeroOrNegativeOneBooleanContent);
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
- setSchedulingPreference(Sched::VLIW);
+ setSchedulingPreference(Sched::Source);
}
MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
@@ -119,9 +129,29 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
MachineFunction * MF = BB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
MachineBasicBlock::iterator I = *MI;
+ const R600InstrInfo *TII =
+ static_cast<const R600InstrInfo*>(MF->getTarget().getInstrInfo());
switch (MI->getOpcode()) {
- default: return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
+ default:
+ // Replace LDS_*_RET instruction that don't have any uses with the
+ // equivalent LDS_*_NORET instruction.
+ if (TII->isLDSRetInstr(MI->getOpcode())) {
+ int DstIdx = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ assert(DstIdx != -1);
+ MachineInstrBuilder NewMI;
+ if (!MRI.use_empty(MI->getOperand(DstIdx).getReg()))
+ return BB;
+
+ NewMI = BuildMI(*BB, I, BB->findDebugLoc(I),
+ TII->get(AMDGPU::getLDSNoRetOp(MI->getOpcode())));
+ for (unsigned i = 1, e = MI->getNumOperands(); i < e; ++i) {
+ NewMI.addOperand(MI->getOperand(i));
+ }
+ } else {
+ return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
+ }
+ break;
case AMDGPU::CLAMP_R600: {
MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
AMDGPU::MOV,
@@ -169,12 +199,13 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::CONST_COPY: {
MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV,
MI->getOperand(0).getReg(), AMDGPU::ALU_CONST);
- TII->setImmOperand(NewMI, R600Operands::SRC0_SEL,
+ TII->setImmOperand(NewMI, AMDGPU::OpName::src0_sel,
MI->getOperand(1).getImm());
break;
}
case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
+ case AMDGPU::RAT_WRITE_CACHELESS_64_eg:
case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
unsigned EOP = (llvm::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
@@ -188,23 +219,99 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::TXD: {
unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-
+ MachineOperand &RID = MI->getOperand(4);
+ MachineOperand &SID = MI->getOperand(5);
+ unsigned TextureId = MI->getOperand(6).getImm();
+ unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
+ unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
+
+ switch (TextureId) {
+ case 5: // Rect
+ CTX = CTY = 0;
+ break;
+ case 6: // Shadow1D
+ SrcW = SrcZ;
+ break;
+ case 7: // Shadow2D
+ SrcW = SrcZ;
+ break;
+ case 8: // ShadowRect
+ CTX = CTY = 0;
+ SrcW = SrcZ;
+ break;
+ case 9: // 1DArray
+ SrcZ = SrcY;
+ CTZ = 0;
+ break;
+ case 10: // 2DArray
+ CTZ = 0;
+ break;
+ case 11: // Shadow1DArray
+ SrcZ = SrcY;
+ CTZ = 0;
+ break;
+ case 12: // Shadow2DArray
+ CTZ = 0;
+ break;
+ }
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
.addOperand(MI->getOperand(3))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6));
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
.addOperand(MI->getOperand(2))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6));
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW)
.addReg(T0, RegState::Implicit)
.addReg(T1, RegState::Implicit);
break;
@@ -213,23 +320,100 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::TXD_SHADOW: {
unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
+ MachineOperand &RID = MI->getOperand(4);
+ MachineOperand &SID = MI->getOperand(5);
+ unsigned TextureId = MI->getOperand(6).getImm();
+ unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
+ unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
+
+ switch (TextureId) {
+ case 5: // Rect
+ CTX = CTY = 0;
+ break;
+ case 6: // Shadow1D
+ SrcW = SrcZ;
+ break;
+ case 7: // Shadow2D
+ SrcW = SrcZ;
+ break;
+ case 8: // ShadowRect
+ CTX = CTY = 0;
+ SrcW = SrcZ;
+ break;
+ case 9: // 1DArray
+ SrcZ = SrcY;
+ CTZ = 0;
+ break;
+ case 10: // 2DArray
+ CTZ = 0;
+ break;
+ case 11: // Shadow1DArray
+ SrcZ = SrcY;
+ CTZ = 0;
+ break;
+ case 12: // Shadow2DArray
+ CTZ = 0;
+ break;
+ }
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
.addOperand(MI->getOperand(3))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6));
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
.addOperand(MI->getOperand(2))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6));
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW)
.addReg(T0, RegState::Implicit)
.addReg(T1, RegState::Implicit);
break;
@@ -321,30 +505,27 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
// Custom DAG Lowering Operations
//===----------------------------------------------------------------------===//
-using namespace llvm::Intrinsic;
-using namespace llvm::AMDGPUIntrinsic;
-
SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
switch (Op.getOpcode()) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
- case ISD::ROTL: return LowerROTL(Op, DAG);
+ case ISD::FCOS:
+ case ISD::FSIN: return LowerTrig(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
- case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
case ISD::LOAD: return LowerLOAD(Op, DAG);
- case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
+ case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
case ISD::INTRINSIC_VOID: {
SDValue Chain = Op.getOperand(0);
unsigned IntrinsicID =
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
switch (IntrinsicID) {
case AMDGPUIntrinsic::AMDGPU_store_output: {
- MachineFunction &MF = DAG.getMachineFunction();
- R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
MFI->LiveOuts.push_back(Reg);
- return DAG.getCopyToReg(Chain, Op.getDebugLoc(), Reg, Op.getOperand(2));
+ return DAG.getCopyToReg(Chain, SDLoc(Op), Reg, Op.getOperand(2));
}
case AMDGPUIntrinsic::R600_store_swizzle: {
const SDValue Args[8] = {
@@ -357,7 +538,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
DAG.getConstant(2, MVT::i32), // SWZ_Z
DAG.getConstant(3, MVT::i32) // SWZ_W
};
- return DAG.getNode(AMDGPUISD::EXPORT, Op.getDebugLoc(), Op.getValueType(),
+ return DAG.getNode(AMDGPUISD::EXPORT, SDLoc(Op), Op.getValueType(),
Args, 8);
}
@@ -371,13 +552,17 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
unsigned IntrinsicID =
cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
switch(IntrinsicID) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
case AMDGPUIntrinsic::R600_load_input: {
int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
- return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass, Reg, VT);
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ MRI.addLiveIn(Reg);
+ return DAG.getCopyFromReg(DAG.getEntryNode(),
+ SDLoc(DAG.getEntryNode()), Reg, VT);
}
case AMDGPUIntrinsic::R600_interp_input: {
@@ -385,66 +570,184 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
int ijb = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
MachineSDNode *interp;
if (ijb < 0) {
+ const MachineFunction &MF = DAG.getMachineFunction();
+ const R600InstrInfo *TII =
+ static_cast<const R600InstrInfo*>(MF.getTarget().getInstrInfo());
interp = DAG.getMachineNode(AMDGPU::INTERP_VEC_LOAD, DL,
MVT::v4f32, DAG.getTargetConstant(slot / 4 , MVT::i32));
return DAG.getTargetExtractSubreg(
TII->getRegisterInfo().getSubRegFromChannel(slot % 4),
DL, MVT::f32, SDValue(interp, 0));
}
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ unsigned RegisterI = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb);
+ unsigned RegisterJ = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1);
+ MRI.addLiveIn(RegisterI);
+ MRI.addLiveIn(RegisterJ);
+ SDValue RegisterINode = DAG.getCopyFromReg(DAG.getEntryNode(),
+ SDLoc(DAG.getEntryNode()), RegisterI, MVT::f32);
+ SDValue RegisterJNode = DAG.getCopyFromReg(DAG.getEntryNode(),
+ SDLoc(DAG.getEntryNode()), RegisterJ, MVT::f32);
if (slot % 4 < 2)
interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32),
- CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
- AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1), MVT::f32),
- CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
- AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb), MVT::f32));
+ RegisterJNode, RegisterINode);
else
interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4 , MVT::i32),
- CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
- AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1), MVT::f32),
- CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
- AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb), MVT::f32));
-
+ RegisterJNode, RegisterINode);
return SDValue(interp, slot % 2);
}
+ case AMDGPUIntrinsic::R600_interp_xy:
+ case AMDGPUIntrinsic::R600_interp_zw: {
+ int slot = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ MachineSDNode *interp;
+ SDValue RegisterINode = Op.getOperand(2);
+ SDValue RegisterJNode = Op.getOperand(3);
+
+ if (IntrinsicID == AMDGPUIntrinsic::R600_interp_xy)
+ interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
+ MVT::f32, MVT::f32, DAG.getTargetConstant(slot, MVT::i32),
+ RegisterJNode, RegisterINode);
+ else
+ interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
+ MVT::f32, MVT::f32, DAG.getTargetConstant(slot, MVT::i32),
+ RegisterJNode, RegisterINode);
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f32,
+ SDValue(interp, 0), SDValue(interp, 1));
+ }
+ case AMDGPUIntrinsic::R600_tex:
+ case AMDGPUIntrinsic::R600_texc:
+ case AMDGPUIntrinsic::R600_txl:
+ case AMDGPUIntrinsic::R600_txlc:
+ case AMDGPUIntrinsic::R600_txb:
+ case AMDGPUIntrinsic::R600_txbc:
+ case AMDGPUIntrinsic::R600_txf:
+ case AMDGPUIntrinsic::R600_txq:
+ case AMDGPUIntrinsic::R600_ddx:
+ case AMDGPUIntrinsic::R600_ddy:
+ case AMDGPUIntrinsic::R600_ldptr: {
+ unsigned TextureOp;
+ switch (IntrinsicID) {
+ case AMDGPUIntrinsic::R600_tex:
+ TextureOp = 0;
+ break;
+ case AMDGPUIntrinsic::R600_texc:
+ TextureOp = 1;
+ break;
+ case AMDGPUIntrinsic::R600_txl:
+ TextureOp = 2;
+ break;
+ case AMDGPUIntrinsic::R600_txlc:
+ TextureOp = 3;
+ break;
+ case AMDGPUIntrinsic::R600_txb:
+ TextureOp = 4;
+ break;
+ case AMDGPUIntrinsic::R600_txbc:
+ TextureOp = 5;
+ break;
+ case AMDGPUIntrinsic::R600_txf:
+ TextureOp = 6;
+ break;
+ case AMDGPUIntrinsic::R600_txq:
+ TextureOp = 7;
+ break;
+ case AMDGPUIntrinsic::R600_ddx:
+ TextureOp = 8;
+ break;
+ case AMDGPUIntrinsic::R600_ddy:
+ TextureOp = 9;
+ break;
+ case AMDGPUIntrinsic::R600_ldptr:
+ TextureOp = 10;
+ break;
+ default:
+ llvm_unreachable("Unknow Texture Operation");
+ }
+
+ SDValue TexArgs[19] = {
+ DAG.getConstant(TextureOp, MVT::i32),
+ Op.getOperand(1),
+ DAG.getConstant(0, MVT::i32),
+ DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(2, MVT::i32),
+ DAG.getConstant(3, MVT::i32),
+ Op.getOperand(2),
+ Op.getOperand(3),
+ Op.getOperand(4),
+ DAG.getConstant(0, MVT::i32),
+ DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(2, MVT::i32),
+ DAG.getConstant(3, MVT::i32),
+ Op.getOperand(5),
+ Op.getOperand(6),
+ Op.getOperand(7),
+ Op.getOperand(8),
+ Op.getOperand(9),
+ Op.getOperand(10)
+ };
+ return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs, 19);
+ }
+ case AMDGPUIntrinsic::AMDGPU_dp4: {
+ SDValue Args[8] = {
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
+ DAG.getConstant(0, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
+ DAG.getConstant(0, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
+ DAG.getConstant(1, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
+ DAG.getConstant(1, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
+ DAG.getConstant(2, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
+ DAG.getConstant(2, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
+ DAG.getConstant(3, MVT::i32)),
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
+ DAG.getConstant(3, MVT::i32))
+ };
+ return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args, 8);
+ }
- case r600_read_ngroups_x:
+ case Intrinsic::r600_read_ngroups_x:
return LowerImplicitParameter(DAG, VT, DL, 0);
- case r600_read_ngroups_y:
+ case Intrinsic::r600_read_ngroups_y:
return LowerImplicitParameter(DAG, VT, DL, 1);
- case r600_read_ngroups_z:
+ case Intrinsic::r600_read_ngroups_z:
return LowerImplicitParameter(DAG, VT, DL, 2);
- case r600_read_global_size_x:
+ case Intrinsic::r600_read_global_size_x:
return LowerImplicitParameter(DAG, VT, DL, 3);
- case r600_read_global_size_y:
+ case Intrinsic::r600_read_global_size_y:
return LowerImplicitParameter(DAG, VT, DL, 4);
- case r600_read_global_size_z:
+ case Intrinsic::r600_read_global_size_z:
return LowerImplicitParameter(DAG, VT, DL, 5);
- case r600_read_local_size_x:
+ case Intrinsic::r600_read_local_size_x:
return LowerImplicitParameter(DAG, VT, DL, 6);
- case r600_read_local_size_y:
+ case Intrinsic::r600_read_local_size_y:
return LowerImplicitParameter(DAG, VT, DL, 7);
- case r600_read_local_size_z:
+ case Intrinsic::r600_read_local_size_z:
return LowerImplicitParameter(DAG, VT, DL, 8);
- case r600_read_tgid_x:
+ case Intrinsic::r600_read_tgid_x:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T1_X, VT);
- case r600_read_tgid_y:
+ case Intrinsic::r600_read_tgid_y:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T1_Y, VT);
- case r600_read_tgid_z:
+ case Intrinsic::r600_read_tgid_z:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T1_Z, VT);
- case r600_read_tidig_x:
+ case Intrinsic::r600_read_tidig_x:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T0_X, VT);
- case r600_read_tidig_y:
+ case Intrinsic::r600_read_tidig_y:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T0_Y, VT);
- case r600_read_tidig_z:
+ case Intrinsic::r600_read_tidig_z:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T0_Z, VT);
}
@@ -478,10 +781,41 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N,
}
}
+SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
+ // On hw >= R700, COS/SIN input must be between -1. and 1.
+ // Thus we lower them to TRIG ( FRACT ( x / 2Pi + 0.5) - 0.5)
+ EVT VT = Op.getValueType();
+ SDValue Arg = Op.getOperand(0);
+ SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, SDLoc(Op), VT,
+ DAG.getNode(ISD::FADD, SDLoc(Op), VT,
+ DAG.getNode(ISD::FMUL, SDLoc(Op), VT, Arg,
+ DAG.getConstantFP(0.15915494309, MVT::f32)),
+ DAG.getConstantFP(0.5, MVT::f32)));
+ unsigned TrigNode;
+ switch (Op.getOpcode()) {
+ case ISD::FCOS:
+ TrigNode = AMDGPUISD::COS_HW;
+ break;
+ case ISD::FSIN:
+ TrigNode = AMDGPUISD::SIN_HW;
+ break;
+ default:
+ llvm_unreachable("Wrong trig opcode");
+ }
+ SDValue TrigVal = DAG.getNode(TrigNode, SDLoc(Op), VT,
+ DAG.getNode(ISD::FADD, SDLoc(Op), VT, FractPart,
+ DAG.getConstantFP(-0.5, MVT::f32)));
+ if (Gen >= AMDGPUSubtarget::R700)
+ return TrigVal;
+ // On R600 hw, COS/SIN input must be between -Pi and Pi.
+ return DAG.getNode(ISD::FMUL, SDLoc(Op), VT, TrigVal,
+ DAG.getConstantFP(3.14159265359, MVT::f32));
+}
+
SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(
ISD::SETCC,
- Op.getDebugLoc(),
+ SDLoc(Op),
MVT::i1,
Op, DAG.getConstantFP(0.0f, MVT::f32),
DAG.getCondCode(ISD::SETNE)
@@ -489,11 +823,11 @@ SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
}
SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
- DebugLoc DL,
+ SDLoc DL,
unsigned DwordOffset) const {
unsigned ByteOffset = DwordOffset * 4;
PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
- AMDGPUAS::PARAM_I_ADDRESS);
+ AMDGPUAS::CONSTANT_BUFFER_0);
// We shouldn't be using an offset wider than 16-bits for implicit parameters.
assert(isInt<16>(ByteOffset));
@@ -504,32 +838,6 @@ SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
false, false, false, 0);
}
-SDValue R600TargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const {
-
- MachineFunction &MF = DAG.getMachineFunction();
- const AMDGPUFrameLowering *TFL =
- static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
-
- FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
- assert(FIN);
-
- unsigned FrameIndex = FIN->getIndex();
- unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
- return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), MVT::i32);
-}
-
-SDValue R600TargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
- EVT VT = Op.getValueType();
-
- return DAG.getNode(AMDGPUISD::BITALIGN, DL, VT,
- Op.getOperand(0),
- Op.getOperand(0),
- DAG.getNode(ISD::SUB, DL, VT,
- DAG.getConstant(32, MVT::i32),
- Op.getOperand(1)));
-}
-
bool R600TargetLowering::isZero(SDValue Op) const {
if(ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
return Cst->isNullValue();
@@ -541,7 +849,7 @@ bool R600TargetLowering::isZero(SDValue Op) const {
}
SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
@@ -560,16 +868,27 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
//
// SET* can match the following patterns:
//
- // select_cc f32, f32, -1, 0, cc_any
- // select_cc f32, f32, 1.0f, 0.0f, cc_any
- // select_cc i32, i32, -1, 0, cc_any
+ // select_cc f32, f32, -1, 0, cc_supported
+ // select_cc f32, f32, 1.0f, 0.0f, cc_supported
+ // select_cc i32, i32, -1, 0, cc_supported
//
// Move hardware True/False values to the correct operand.
+ ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
+ ISD::CondCode InverseCC =
+ ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32);
if (isHWTrueValue(False) && isHWFalseValue(True)) {
- ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
- std::swap(False, True);
- CC = DAG.getCondCode(ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32));
+ if (isCondCodeLegal(InverseCC, CompareVT.getSimpleVT())) {
+ std::swap(False, True);
+ CC = DAG.getCondCode(InverseCC);
+ } else {
+ ISD::CondCode SwapInvCC = ISD::getSetCCSwappedOperands(InverseCC);
+ if (isCondCodeLegal(SwapInvCC, CompareVT.getSimpleVT())) {
+ std::swap(False, True);
+ std::swap(LHS, RHS);
+ CC = DAG.getCondCode(SwapInvCC);
+ }
+ }
}
if (isHWTrueValue(True) && isHWFalseValue(False) &&
@@ -582,14 +901,34 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
//
// CND* can match the following patterns:
//
- // select_cc f32, 0.0, f32, f32, cc_any
- // select_cc f32, 0.0, i32, i32, cc_any
- // select_cc i32, 0, f32, f32, cc_any
- // select_cc i32, 0, i32, i32, cc_any
+ // select_cc f32, 0.0, f32, f32, cc_supported
+ // select_cc f32, 0.0, i32, i32, cc_supported
+ // select_cc i32, 0, f32, f32, cc_supported
+ // select_cc i32, 0, i32, i32, cc_supported
//
- if (isZero(LHS) || isZero(RHS)) {
- SDValue Cond = (isZero(LHS) ? RHS : LHS);
- SDValue Zero = (isZero(LHS) ? LHS : RHS);
+
+ // Try to move the zero value to the RHS
+ if (isZero(LHS)) {
+ ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
+ // Try swapping the operands
+ ISD::CondCode CCSwapped = ISD::getSetCCSwappedOperands(CCOpcode);
+ if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) {
+ std::swap(LHS, RHS);
+ CC = DAG.getCondCode(CCSwapped);
+ } else {
+ // Try inverting the conditon and then swapping the operands
+ ISD::CondCode CCInv = ISD::getSetCCInverse(CCOpcode, CompareVT.isInteger());
+ CCSwapped = ISD::getSetCCSwappedOperands(CCInv);
+ if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) {
+ std::swap(True, False);
+ std::swap(LHS, RHS);
+ CC = DAG.getCondCode(CCSwapped);
+ }
+ }
+ }
+ if (isZero(RHS)) {
+ SDValue Cond = LHS;
+ SDValue Zero = RHS;
ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
if (CompareVT != VT) {
// Bitcast True / False to the correct types. This will end up being
@@ -599,20 +938,11 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
True = DAG.getNode(ISD::BITCAST, DL, CompareVT, True);
False = DAG.getNode(ISD::BITCAST, DL, CompareVT, False);
}
- if (isZero(LHS)) {
- CCOpcode = ISD::getSetCCSwappedOperands(CCOpcode);
- }
switch (CCOpcode) {
case ISD::SETONE:
case ISD::SETUNE:
case ISD::SETNE:
- case ISD::SETULE:
- case ISD::SETULT:
- case ISD::SETOLE:
- case ISD::SETOLT:
- case ISD::SETLE:
- case ISD::SETLT:
CCOpcode = ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32);
Temp = True;
True = False;
@@ -660,17 +990,6 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
DAG.getCondCode(ISD::SETNE));
}
-SDValue R600TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
- return DAG.getNode(ISD::SELECT_CC,
- Op.getDebugLoc(),
- Op.getValueType(),
- Op.getOperand(0),
- DAG.getConstant(0, MVT::i32),
- Op.getOperand(1),
- Op.getOperand(2),
- DAG.getCondCode(ISD::SETNE));
-}
-
/// LLVM generates byte-addresed pointers. For indirect addressing, we need to
/// convert these pointers to a register index. Each register holds
/// 16 bytes, (4 x 32bit sub-register), but we need to take into account the
@@ -693,7 +1012,7 @@ SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr,
default: llvm_unreachable("Invalid stack width");
}
- return DAG.getNode(ISD::SRL, Ptr.getDebugLoc(), Ptr.getValueType(), Ptr,
+ return DAG.getNode(ISD::SRL, SDLoc(Ptr), Ptr.getValueType(), Ptr,
DAG.getConstant(SRLPad, MVT::i32));
}
@@ -727,25 +1046,65 @@ void R600TargetLowering::getStackAddress(unsigned StackWidth,
}
SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
SDValue Chain = Op.getOperand(0);
SDValue Value = Op.getOperand(1);
SDValue Ptr = Op.getOperand(2);
- if (StoreNode->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
- Ptr->getOpcode() != AMDGPUISD::DWORDADDR) {
- // Convert pointer from byte address to dword address.
- Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, Ptr.getValueType(),
- DAG.getNode(ISD::SRL, DL, Ptr.getValueType(),
- Ptr, DAG.getConstant(2, MVT::i32)));
+ SDValue Result = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
+ if (Result.getNode()) {
+ return Result;
+ }
- if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) {
- assert(!"Truncated and indexed stores not supported yet");
- } else {
- Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand());
+ if (StoreNode->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS) {
+ if (StoreNode->isTruncatingStore()) {
+ EVT VT = Value.getValueType();
+ assert(VT.bitsLE(MVT::i32));
+ EVT MemVT = StoreNode->getMemoryVT();
+ SDValue MaskConstant;
+ if (MemVT == MVT::i8) {
+ MaskConstant = DAG.getConstant(0xFF, MVT::i32);
+ } else {
+ assert(MemVT == MVT::i16);
+ MaskConstant = DAG.getConstant(0xFFFF, MVT::i32);
+ }
+ SDValue DWordAddr = DAG.getNode(ISD::SRL, DL, VT, Ptr,
+ DAG.getConstant(2, MVT::i32));
+ SDValue ByteIndex = DAG.getNode(ISD::AND, DL, Ptr.getValueType(), Ptr,
+ DAG.getConstant(0x00000003, VT));
+ SDValue TruncValue = DAG.getNode(ISD::AND, DL, VT, Value, MaskConstant);
+ SDValue Shift = DAG.getNode(ISD::SHL, DL, VT, ByteIndex,
+ DAG.getConstant(3, VT));
+ SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, VT, TruncValue, Shift);
+ SDValue Mask = DAG.getNode(ISD::SHL, DL, VT, MaskConstant, Shift);
+ // XXX: If we add a 64-bit ZW register class, then we could use a 2 x i32
+ // vector instead.
+ SDValue Src[4] = {
+ ShiftedValue,
+ DAG.getConstant(0, MVT::i32),
+ DAG.getConstant(0, MVT::i32),
+ Mask
+ };
+ SDValue Input = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Src, 4);
+ SDValue Args[3] = { Chain, Input, DWordAddr };
+ return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL,
+ Op->getVTList(), Args, 3, MemVT,
+ StoreNode->getMemOperand());
+ } else if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR &&
+ Value.getValueType().bitsGE(MVT::i32)) {
+ // Convert pointer from byte address to dword address.
+ Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, Ptr.getValueType(),
+ DAG.getNode(ISD::SRL, DL, Ptr.getValueType(),
+ Ptr, DAG.getConstant(2, MVT::i32)));
+
+ if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) {
+ assert(!"Truncated and indexed stores not supported yet");
+ } else {
+ Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand());
+ }
+ return Chain;
}
- return Chain;
}
EVT ValueVT = Value.getValueType();
@@ -789,7 +1148,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
Value = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Value);
}
Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, Chain, Value, Ptr,
- DAG.getTargetConstant(0, MVT::i32)); // Channel
+ DAG.getTargetConstant(0, MVT::i32)); // Channel
}
return Chain;
@@ -839,18 +1198,28 @@ ConstantAddressBlock(unsigned AddressSpace) {
SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
{
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
SDValue Chain = Op.getOperand(0);
SDValue Ptr = Op.getOperand(1);
SDValue LoweredLoad;
+ if (LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && VT.isVector()) {
+ SDValue MergedValues[2] = {
+ SplitVectorLoad(Op, DAG),
+ Chain
+ };
+ return DAG.getMergeValues(MergedValues, 2, DL);
+ }
+
int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace());
- if (ConstantBlock > -1) {
+ if (ConstantBlock > -1 &&
+ ((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) ||
+ (LoadNode->getExtensionType() == ISD::ZEXTLOAD))) {
SDValue Result;
- if (dyn_cast<ConstantExpr>(LoadNode->getSrcValue()) ||
- dyn_cast<Constant>(LoadNode->getSrcValue()) ||
- dyn_cast<ConstantSDNode>(Ptr)) {
+ if (isa<ConstantExpr>(LoadNode->getSrcValue()) ||
+ isa<Constant>(LoadNode->getSrcValue()) ||
+ isa<ConstantSDNode>(Ptr)) {
SDValue Slots[4];
for (unsigned i = 0; i < 4; i++) {
// We want Const position encoded with the following formula :
@@ -862,13 +1231,19 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
DAG.getConstant(4 * i + ConstantBlock * 16, MVT::i32));
Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr);
}
- Result = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Slots, 4);
+ EVT NewVT = MVT::v4i32;
+ unsigned NumElements = 4;
+ if (VT.isVector()) {
+ NewVT = VT;
+ NumElements = VT.getVectorNumElements();
+ }
+ Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT, Slots, NumElements);
} else {
// non constant ptr cant be folded, keeps it as a v4f32 load
Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(4, MVT::i32)),
DAG.getConstant(LoadNode->getAddressSpace() -
- AMDGPUAS::CONSTANT_BUFFER_0, MVT::i32)
+ AMDGPUAS::CONSTANT_BUFFER_0, MVT::i32)
);
}
@@ -884,6 +1259,30 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
return DAG.getMergeValues(MergedValues, 2, DL);
}
+ // For most operations returning SDValue() will result in the node being
+ // expanded by the DAG Legalizer. This is not the case for ISD::LOAD, so we
+ // need to manually expand loads that may be legal in some address spaces and
+ // illegal in others. SEXT loads from CONSTANT_BUFFER_0 are supported for
+ // compute shaders, since the data is sign extended when it is uploaded to the
+ // buffer. However SEXT loads from other address spaces are not supported, so
+ // we need to expand them here.
+ if (LoadNode->getExtensionType() == ISD::SEXTLOAD) {
+ EVT MemVT = LoadNode->getMemoryVT();
+ assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8));
+ SDValue ShiftAmount =
+ DAG.getConstant(VT.getSizeInBits() - MemVT.getSizeInBits(), MVT::i32);
+ SDValue NewLoad = DAG.getExtLoad(ISD::EXTLOAD, DL, VT, Chain, Ptr,
+ LoadNode->getPointerInfo(), MemVT,
+ LoadNode->isVolatile(),
+ LoadNode->isNonTemporal(),
+ LoadNode->getAlignment());
+ SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, NewLoad, ShiftAmount);
+ SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Shl, ShiftAmount);
+
+ SDValue MergedValues[2] = { Sra, Chain };
+ return DAG.getMergeValues(MergedValues, 2, DL);
+ }
+
if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
return SDValue();
}
@@ -941,42 +1340,158 @@ SDValue R600TargetLowering::LowerFormalArguments(
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
- unsigned ParamOffsetBytes = 36;
- Function::const_arg_iterator FuncArg =
- DAG.getMachineFunction().getFunction()->arg_begin();
- for (unsigned i = 0, e = Ins.size(); i < e; ++i, ++FuncArg) {
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), ArgLocs, *DAG.getContext());
+ MachineFunction &MF = DAG.getMachineFunction();
+ unsigned ShaderType = MF.getInfo<R600MachineFunctionInfo>()->ShaderType;
+
+ SmallVector<ISD::InputArg, 8> LocalIns;
+
+ getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
+ LocalIns);
+
+ AnalyzeFormalArguments(CCInfo, LocalIns);
+
+ for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
EVT VT = Ins[i].VT;
- Type *ArgType = FuncArg->getType();
- unsigned ArgSizeInBits = ArgType->isPointerTy() ?
- 32 : ArgType->getPrimitiveSizeInBits();
- unsigned ArgBytes = ArgSizeInBits >> 3;
- EVT ArgVT;
- if (ArgSizeInBits < VT.getSizeInBits()) {
- assert(!ArgType->isFloatTy() &&
- "Extending floating point arguments not supported yet");
- ArgVT = MVT::getIntegerVT(ArgSizeInBits);
- } else {
- ArgVT = VT;
+ EVT MemVT = LocalIns[i].VT;
+
+ if (ShaderType != ShaderType::COMPUTE) {
+ unsigned Reg = MF.addLiveIn(VA.getLocReg(), &AMDGPU::R600_Reg128RegClass);
+ SDValue Register = DAG.getCopyFromReg(Chain, DL, Reg, VT);
+ InVals.push_back(Register);
+ continue;
}
+
PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
- AMDGPUAS::PARAM_I_ADDRESS);
- SDValue Arg = DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, DAG.getRoot(),
- DAG.getConstant(ParamOffsetBytes, MVT::i32),
- MachinePointerInfo(UndefValue::get(PtrTy)),
- ArgVT, false, false, ArgBytes);
+ AMDGPUAS::CONSTANT_BUFFER_0);
+
+ // The first 36 bytes of the input buffer contains information about
+ // thread group and global sizes.
+ SDValue Arg = DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, Chain,
+ DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32),
+ MachinePointerInfo(UndefValue::get(PtrTy)),
+ MemVT, false, false, 4);
+ // 4 is the prefered alignment for
+ // the CONSTANT memory space.
InVals.push_back(Arg);
- ParamOffsetBytes += ArgBytes;
}
return Chain;
}
-EVT R600TargetLowering::getSetCCResultType(EVT VT) const {
+EVT R600TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
if (!VT.isVector()) return MVT::i32;
return VT.changeVectorElementTypeToInteger();
}
+static SDValue
+CompactSwizzlableVector(SelectionDAG &DAG, SDValue VectorEntry,
+ DenseMap<unsigned, unsigned> &RemapSwizzle) {
+ assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR);
+ assert(RemapSwizzle.empty());
+ SDValue NewBldVec[4] = {
+ VectorEntry.getOperand(0),
+ VectorEntry.getOperand(1),
+ VectorEntry.getOperand(2),
+ VectorEntry.getOperand(3)
+ };
+
+ for (unsigned i = 0; i < 4; i++) {
+ if (NewBldVec[i].getOpcode() == ISD::UNDEF)
+ // We mask write here to teach later passes that the ith element of this
+ // vector is undef. Thus we can use it to reduce 128 bits reg usage,
+ // break false dependencies and additionnaly make assembly easier to read.
+ RemapSwizzle[i] = 7; // SEL_MASK_WRITE
+ if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(NewBldVec[i])) {
+ if (C->isZero()) {
+ RemapSwizzle[i] = 4; // SEL_0
+ NewBldVec[i] = DAG.getUNDEF(MVT::f32);
+ } else if (C->isExactlyValue(1.0)) {
+ RemapSwizzle[i] = 5; // SEL_1
+ NewBldVec[i] = DAG.getUNDEF(MVT::f32);
+ }
+ }
+
+ if (NewBldVec[i].getOpcode() == ISD::UNDEF)
+ continue;
+ for (unsigned j = 0; j < i; j++) {
+ if (NewBldVec[i] == NewBldVec[j]) {
+ NewBldVec[i] = DAG.getUNDEF(NewBldVec[i].getValueType());
+ RemapSwizzle[i] = j;
+ break;
+ }
+ }
+ }
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
+ VectorEntry.getValueType(), NewBldVec, 4);
+}
+
+static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
+ DenseMap<unsigned, unsigned> &RemapSwizzle) {
+ assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR);
+ assert(RemapSwizzle.empty());
+ SDValue NewBldVec[4] = {
+ VectorEntry.getOperand(0),
+ VectorEntry.getOperand(1),
+ VectorEntry.getOperand(2),
+ VectorEntry.getOperand(3)
+ };
+ bool isUnmovable[4] = { false, false, false, false };
+ for (unsigned i = 0; i < 4; i++)
+ RemapSwizzle[i] = i;
+
+ for (unsigned i = 0; i < 4; i++) {
+ if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
+ unsigned Idx = dyn_cast<ConstantSDNode>(NewBldVec[i].getOperand(1))
+ ->getZExtValue();
+ if (i == Idx) {
+ isUnmovable[Idx] = true;
+ continue;
+ }
+ if (isUnmovable[Idx])
+ continue;
+ // Swap i and Idx
+ std::swap(NewBldVec[Idx], NewBldVec[i]);
+ std::swap(RemapSwizzle[i], RemapSwizzle[Idx]);
+ break;
+ }
+ }
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
+ VectorEntry.getValueType(), NewBldVec, 4);
+}
+
+
+SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector,
+SDValue Swz[4], SelectionDAG &DAG) const {
+ assert(BuildVector.getOpcode() == ISD::BUILD_VECTOR);
+ // Old -> New swizzle values
+ DenseMap<unsigned, unsigned> SwizzleRemap;
+
+ BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
+ for (unsigned i = 0; i < 4; i++) {
+ unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
+ Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
+ }
+
+ SwizzleRemap.clear();
+ BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
+ for (unsigned i = 0; i < 4; i++) {
+ unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue();
+ if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
+ Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
+ }
+
+ return BuildVector;
+}
+
+
//===----------------------------------------------------------------------===//
// Custom DAG Optimizations
//===----------------------------------------------------------------------===//
@@ -990,7 +1505,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::FP_ROUND: {
SDValue Arg = N->getOperand(0);
if (Arg.getOpcode() == ISD::UINT_TO_FP && Arg.getValueType() == MVT::f64) {
- return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), N->getValueType(0),
+ return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), N->getValueType(0),
Arg.getOperand(0));
}
break;
@@ -1015,7 +1530,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
return SDValue();
}
- return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), N->getValueType(0),
+ return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N->getValueType(0),
SelectCC.getOperand(0), // LHS
SelectCC.getOperand(1), // RHS
DAG.getConstant(-1, MVT::i32), // True
@@ -1024,6 +1539,61 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
break;
}
+
+ // insert_vector_elt (build_vector elt0, ... , eltN), NewEltIdx, idx
+ // => build_vector elt0, ... , NewEltIdx, ... , eltN
+ case ISD::INSERT_VECTOR_ELT: {
+ SDValue InVec = N->getOperand(0);
+ SDValue InVal = N->getOperand(1);
+ SDValue EltNo = N->getOperand(2);
+ SDLoc dl(N);
+
+ // If the inserted element is an UNDEF, just use the input vector.
+ if (InVal.getOpcode() == ISD::UNDEF)
+ return InVec;
+
+ EVT VT = InVec.getValueType();
+
+ // If we can't generate a legal BUILD_VECTOR, exit
+ if (!isOperationLegal(ISD::BUILD_VECTOR, VT))
+ return SDValue();
+
+ // Check that we know which element is being inserted
+ if (!isa<ConstantSDNode>(EltNo))
+ return SDValue();
+ unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+
+ // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
+ // be converted to a BUILD_VECTOR). Fill in the Ops vector with the
+ // vector elements.
+ SmallVector<SDValue, 8> Ops;
+ if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
+ Ops.append(InVec.getNode()->op_begin(),
+ InVec.getNode()->op_end());
+ } else if (InVec.getOpcode() == ISD::UNDEF) {
+ unsigned NElts = VT.getVectorNumElements();
+ Ops.append(NElts, DAG.getUNDEF(InVal.getValueType()));
+ } else {
+ return SDValue();
+ }
+
+ // Insert the element
+ if (Elt < Ops.size()) {
+ // All the operands of BUILD_VECTOR must have the same type;
+ // we enforce that here.
+ EVT OpVT = Ops[0].getValueType();
+ if (InVal.getValueType() != OpVT)
+ InVal = OpVT.bitsGT(InVal.getValueType()) ?
+ DAG.getNode(ISD::ANY_EXTEND, dl, OpVT, InVal) :
+ DAG.getNode(ISD::TRUNCATE, dl, OpVT, InVal);
+ Ops[Elt] = InVal;
+ }
+
+ // Return the new vector
+ return DAG.getNode(ISD::BUILD_VECTOR, dl,
+ VT, &Ops[0], Ops.size());
+ }
+
// Extract_vec (Build_vector) generated by custom lowering
// also needs to be customly combined
case ISD::EXTRACT_VECTOR_ELT: {
@@ -1038,7 +1608,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
unsigned Element = Const->getZExtValue();
- return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getVTList(),
+ return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getVTList(),
Arg->getOperand(0).getOperand(Element));
}
}
@@ -1073,25 +1643,25 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
ISD::CondCode LHSCC = cast<CondCodeSDNode>(LHS.getOperand(4))->get();
LHSCC = ISD::getSetCCInverse(LHSCC,
LHS.getOperand(0).getValueType().isInteger());
- return DAG.getSelectCC(N->getDebugLoc(),
- LHS.getOperand(0),
- LHS.getOperand(1),
- LHS.getOperand(2),
- LHS.getOperand(3),
- LHSCC);
+ if (DCI.isBeforeLegalizeOps() ||
+ isCondCodeLegal(LHSCC, LHS.getOperand(0).getSimpleValueType()))
+ return DAG.getSelectCC(SDLoc(N),
+ LHS.getOperand(0),
+ LHS.getOperand(1),
+ LHS.getOperand(2),
+ LHS.getOperand(3),
+ LHSCC);
+ break;
}
}
+ return SDValue();
}
+
case AMDGPUISD::EXPORT: {
SDValue Arg = N->getOperand(1);
if (Arg.getOpcode() != ISD::BUILD_VECTOR)
break;
- SDValue NewBldVec[4] = {
- DAG.getUNDEF(MVT::f32),
- DAG.getUNDEF(MVT::f32),
- DAG.getUNDEF(MVT::f32),
- DAG.getUNDEF(MVT::f32)
- };
+
SDValue NewArgs[8] = {
N->getOperand(0), // Chain
SDValue(),
@@ -1102,23 +1672,290 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
N->getOperand(6), // SWZ_Z
N->getOperand(7) // SWZ_W
};
- for (unsigned i = 0; i < Arg.getNumOperands(); i++) {
- if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Arg.getOperand(i))) {
- if (C->isZero()) {
- NewArgs[4 + i] = DAG.getConstant(4, MVT::i32); // SEL_0
- } else if (C->isExactlyValue(1.0)) {
- NewArgs[4 + i] = DAG.getConstant(5, MVT::i32); // SEL_0
- } else {
- NewBldVec[i] = Arg.getOperand(i);
+ SDLoc DL(N);
+ NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG);
+ return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs, 8);
+ }
+ case AMDGPUISD::TEXTURE_FETCH: {
+ SDValue Arg = N->getOperand(1);
+ if (Arg.getOpcode() != ISD::BUILD_VECTOR)
+ break;
+
+ SDValue NewArgs[19] = {
+ N->getOperand(0),
+ N->getOperand(1),
+ N->getOperand(2),
+ N->getOperand(3),
+ N->getOperand(4),
+ N->getOperand(5),
+ N->getOperand(6),
+ N->getOperand(7),
+ N->getOperand(8),
+ N->getOperand(9),
+ N->getOperand(10),
+ N->getOperand(11),
+ N->getOperand(12),
+ N->getOperand(13),
+ N->getOperand(14),
+ N->getOperand(15),
+ N->getOperand(16),
+ N->getOperand(17),
+ N->getOperand(18),
+ };
+ NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG);
+ return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, SDLoc(N), N->getVTList(),
+ NewArgs, 19);
+ }
+ }
+ return SDValue();
+}
+
+static bool
+FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg,
+ SDValue &Abs, SDValue &Sel, SDValue &Imm, SelectionDAG &DAG) {
+ const R600InstrInfo *TII =
+ static_cast<const R600InstrInfo *>(DAG.getTarget().getInstrInfo());
+ if (!Src.isMachineOpcode())
+ return false;
+ switch (Src.getMachineOpcode()) {
+ case AMDGPU::FNEG_R600:
+ if (!Neg.getNode())
+ return false;
+ Src = Src.getOperand(0);
+ Neg = DAG.getTargetConstant(1, MVT::i32);
+ return true;
+ case AMDGPU::FABS_R600:
+ if (!Abs.getNode())
+ return false;
+ Src = Src.getOperand(0);
+ Abs = DAG.getTargetConstant(1, MVT::i32);
+ return true;
+ case AMDGPU::CONST_COPY: {
+ unsigned Opcode = ParentNode->getMachineOpcode();
+ bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
+
+ if (!Sel.getNode())
+ return false;
+
+ SDValue CstOffset = Src.getOperand(0);
+ if (ParentNode->getValueType(0).isVector())
+ return false;
+
+ // Gather constants values
+ int SrcIndices[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
+ };
+ std::vector<unsigned> Consts;
+ for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
+ int OtherSrcIdx = SrcIndices[i];
+ int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
+ if (OtherSrcIdx < 0 || OtherSelIdx < 0)
+ continue;
+ if (HasDst) {
+ OtherSrcIdx--;
+ OtherSelIdx--;
+ }
+ if (RegisterSDNode *Reg =
+ dyn_cast<RegisterSDNode>(ParentNode->getOperand(OtherSrcIdx))) {
+ if (Reg->getReg() == AMDGPU::ALU_CONST) {
+ ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(
+ ParentNode->getOperand(OtherSelIdx));
+ Consts.push_back(Cst->getZExtValue());
}
+ }
+ }
+
+ ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
+ Consts.push_back(Cst->getZExtValue());
+ if (!TII->fitsConstReadLimitations(Consts)) {
+ return false;
+ }
+
+ Sel = CstOffset;
+ Src = DAG.getRegister(AMDGPU::ALU_CONST, MVT::f32);
+ return true;
+ }
+ case AMDGPU::MOV_IMM_I32:
+ case AMDGPU::MOV_IMM_F32: {
+ unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
+ uint64_t ImmValue = 0;
+
+
+ if (Src.getMachineOpcode() == AMDGPU::MOV_IMM_F32) {
+ ConstantFPSDNode *FPC = dyn_cast<ConstantFPSDNode>(Src.getOperand(0));
+ float FloatValue = FPC->getValueAPF().convertToFloat();
+ if (FloatValue == 0.0) {
+ ImmReg = AMDGPU::ZERO;
+ } else if (FloatValue == 0.5) {
+ ImmReg = AMDGPU::HALF;
+ } else if (FloatValue == 1.0) {
+ ImmReg = AMDGPU::ONE;
+ } else {
+ ImmValue = FPC->getValueAPF().bitcastToAPInt().getZExtValue();
+ }
+ } else {
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src.getOperand(0));
+ uint64_t Value = C->getZExtValue();
+ if (Value == 0) {
+ ImmReg = AMDGPU::ZERO;
+ } else if (Value == 1) {
+ ImmReg = AMDGPU::ONE_INT;
} else {
- NewBldVec[i] = Arg.getOperand(i);
+ ImmValue = Value;
}
}
- DebugLoc DL = N->getDebugLoc();
- NewArgs[1] = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4f32, NewBldVec, 4);
- return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs, 8);
+
+ // Check that we aren't already using an immediate.
+ // XXX: It's possible for an instruction to have more than one
+ // immediate operand, but this is not supported yet.
+ if (ImmReg == AMDGPU::ALU_LITERAL_X) {
+ if (!Imm.getNode())
+ return false;
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Imm);
+ assert(C);
+ if (C->getZExtValue())
+ return false;
+ Imm = DAG.getTargetConstant(ImmValue, MVT::i32);
+ }
+ Src = DAG.getRegister(ImmReg, MVT::i32);
+ return true;
}
+ default:
+ return false;
}
- return SDValue();
+}
+
+
+/// \brief Fold the instructions after selecting them
+SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node,
+ SelectionDAG &DAG) const {
+ const R600InstrInfo *TII =
+ static_cast<const R600InstrInfo *>(DAG.getTarget().getInstrInfo());
+ if (!Node->isMachineOpcode())
+ return Node;
+ unsigned Opcode = Node->getMachineOpcode();
+ SDValue FakeOp;
+
+ std::vector<SDValue> Ops;
+ for(SDNode::op_iterator I = Node->op_begin(), E = Node->op_end();
+ I != E; ++I)
+ Ops.push_back(*I);
+
+ if (Opcode == AMDGPU::DOT_4) {
+ int OperandIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
+ };
+ int NegIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
+ };
+ int AbsIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
+ };
+ for (unsigned i = 0; i < 8; i++) {
+ if (OperandIdx[i] < 0)
+ return Node;
+ SDValue &Src = Ops[OperandIdx[i] - 1];
+ SDValue &Neg = Ops[NegIdx[i] - 1];
+ SDValue &Abs = Ops[AbsIdx[i] - 1];
+ bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
+ int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]);
+ if (HasDst)
+ SelIdx--;
+ SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp;
+ if (FoldOperand(Node, i, Src, Neg, Abs, Sel, FakeOp, DAG))
+ return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
+ }
+ } else if (Opcode == AMDGPU::REG_SEQUENCE) {
+ for (unsigned i = 1, e = Node->getNumOperands(); i < e; i += 2) {
+ SDValue &Src = Ops[i];
+ if (FoldOperand(Node, i, Src, FakeOp, FakeOp, FakeOp, FakeOp, DAG))
+ return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
+ }
+ } else if (Opcode == AMDGPU::CLAMP_R600) {
+ SDValue Src = Node->getOperand(0);
+ if (!Src.isMachineOpcode() ||
+ !TII->hasInstrModifiers(Src.getMachineOpcode()))
+ return Node;
+ int ClampIdx = TII->getOperandIdx(Src.getMachineOpcode(),
+ AMDGPU::OpName::clamp);
+ if (ClampIdx < 0)
+ return Node;
+ std::vector<SDValue> Ops;
+ unsigned NumOp = Src.getNumOperands();
+ for(unsigned i = 0; i < NumOp; ++i)
+ Ops.push_back(Src.getOperand(i));
+ Ops[ClampIdx - 1] = DAG.getTargetConstant(1, MVT::i32);
+ return DAG.getMachineNode(Src.getMachineOpcode(), SDLoc(Node),
+ Node->getVTList(), Ops);
+ } else {
+ if (!TII->hasInstrModifiers(Opcode))
+ return Node;
+ int OperandIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
+ };
+ int NegIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
+ };
+ int AbsIdx[] = {
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
+ TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
+ -1
+ };
+ for (unsigned i = 0; i < 3; i++) {
+ if (OperandIdx[i] < 0)
+ return Node;
+ SDValue &Src = Ops[OperandIdx[i] - 1];
+ SDValue &Neg = Ops[NegIdx[i] - 1];
+ SDValue FakeAbs;
+ SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
+ bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
+ int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]);
+ int ImmIdx = TII->getOperandIdx(Opcode, AMDGPU::OpName::literal);
+ if (HasDst) {
+ SelIdx--;
+ ImmIdx--;
+ }
+ SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp;
+ SDValue &Imm = Ops[ImmIdx];
+ if (FoldOperand(Node, i, Src, Neg, Abs, Sel, Imm, DAG))
+ return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
+ }
+ }
+
+ return Node;
}
diff --git a/lib/Target/R600/R600ISelLowering.h b/lib/Target/R600/R600ISelLowering.h
index 2c09acb..c10257e 100644
--- a/lib/Target/R600/R600ISelLowering.h
+++ b/lib/Target/R600/R600ISelLowering.h
@@ -36,37 +36,37 @@ public:
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- virtual EVT getSetCCResultType(EVT VT) const;
+ virtual EVT getSetCCResultType(LLVMContext &, EVT VT) const;
private:
- const R600InstrInfo * TII;
-
+ unsigned Gen;
/// Each OpenCL kernel has nine implicit parameters that are stored in the
/// first nine dwords of a Vertex Buffer. These implicit parameters are
/// lowered to load instructions which retreive the values from the Vertex
/// Buffer.
SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
- DebugLoc DL, unsigned DwordOffset) const;
+ SDLoc DL, unsigned DwordOffset) const;
void lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB,
MachineRegisterInfo & MRI, unsigned dword_offset) const;
+ SDValue OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], SelectionDAG &DAG) const;
/// \brief Lower ROTL opcode to BITALIGN
SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
SDValue stackPtrToRegIndex(SDValue Ptr, unsigned StackWidth,
SelectionDAG &DAG) const;
void getStackAddress(unsigned StackWidth, unsigned ElemIdx,
unsigned &Channel, unsigned &PtrIncr) const;
bool isZero(SDValue Op) const;
+ virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const;
};
} // End namespace llvm;
diff --git a/lib/Target/R600/R600InstrFormats.td b/lib/Target/R600/R600InstrFormats.td
new file mode 100644
index 0000000..9428bab
--- /dev/null
+++ b/lib/Target/R600/R600InstrFormats.td
@@ -0,0 +1,492 @@
+//===-- R600InstrFormats.td - R600 Instruction Encodings ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// R600 Instruction format definitions.
+//
+//===----------------------------------------------------------------------===//
+
+class InstR600 <dag outs, dag ins, string asm, list<dag> pattern,
+ InstrItinClass itin>
+ : AMDGPUInst <outs, ins, asm, pattern> {
+
+ field bits<64> Inst;
+ bit Trig = 0;
+ bit Op3 = 0;
+ bit isVector = 0;
+ bits<2> FlagOperandIdx = 0;
+ bit Op1 = 0;
+ bit Op2 = 0;
+ bit LDS_1A = 0;
+ bit LDS_1A1D = 0;
+ bit HasNativeOperands = 0;
+ bit VTXInst = 0;
+ bit TEXInst = 0;
+ bit ALUInst = 0;
+ bit IsExport = 0;
+ bit LDS_1A2D = 0;
+
+ let Namespace = "AMDGPU";
+ let OutOperandList = outs;
+ let InOperandList = ins;
+ let AsmString = asm;
+ let Pattern = pattern;
+ let Itinerary = itin;
+
+ let TSFlags{4} = Trig;
+ let TSFlags{5} = Op3;
+
+ // Vector instructions are instructions that must fill all slots in an
+ // instruction group
+ let TSFlags{6} = isVector;
+ let TSFlags{8-7} = FlagOperandIdx;
+ let TSFlags{9} = HasNativeOperands;
+ let TSFlags{10} = Op1;
+ let TSFlags{11} = Op2;
+ let TSFlags{12} = VTXInst;
+ let TSFlags{13} = TEXInst;
+ let TSFlags{14} = ALUInst;
+ let TSFlags{15} = LDS_1A;
+ let TSFlags{16} = LDS_1A1D;
+ let TSFlags{17} = IsExport;
+ let TSFlags{18} = LDS_1A2D;
+}
+
+//===----------------------------------------------------------------------===//
+// ALU instructions
+//===----------------------------------------------------------------------===//
+
+class R600_ALU_LDS_Word0 {
+ field bits<32> Word0;
+
+ bits<11> src0;
+ bits<1> src0_rel;
+ bits<11> src1;
+ bits<1> src1_rel;
+ bits<3> index_mode = 0;
+ bits<2> pred_sel;
+ bits<1> last;
+
+ bits<9> src0_sel = src0{8-0};
+ bits<2> src0_chan = src0{10-9};
+ bits<9> src1_sel = src1{8-0};
+ bits<2> src1_chan = src1{10-9};
+
+ let Word0{8-0} = src0_sel;
+ let Word0{9} = src0_rel;
+ let Word0{11-10} = src0_chan;
+ let Word0{21-13} = src1_sel;
+ let Word0{22} = src1_rel;
+ let Word0{24-23} = src1_chan;
+ let Word0{28-26} = index_mode;
+ let Word0{30-29} = pred_sel;
+ let Word0{31} = last;
+}
+
+class R600ALU_Word0 : R600_ALU_LDS_Word0 {
+
+ bits<1> src0_neg;
+ bits<1> src1_neg;
+
+ let Word0{12} = src0_neg;
+ let Word0{25} = src1_neg;
+}
+
+class R600ALU_Word1 {
+ field bits<32> Word1;
+
+ bits<11> dst;
+ bits<3> bank_swizzle;
+ bits<1> dst_rel;
+ bits<1> clamp;
+
+ bits<7> dst_sel = dst{6-0};
+ bits<2> dst_chan = dst{10-9};
+
+ let Word1{20-18} = bank_swizzle;
+ let Word1{27-21} = dst_sel;
+ let Word1{28} = dst_rel;
+ let Word1{30-29} = dst_chan;
+ let Word1{31} = clamp;
+}
+
+class R600ALU_Word1_OP2 <bits<11> alu_inst> : R600ALU_Word1{
+
+ bits<1> src0_abs;
+ bits<1> src1_abs;
+ bits<1> update_exec_mask;
+ bits<1> update_pred;
+ bits<1> write;
+ bits<2> omod;
+
+ let Word1{0} = src0_abs;
+ let Word1{1} = src1_abs;
+ let Word1{2} = update_exec_mask;
+ let Word1{3} = update_pred;
+ let Word1{4} = write;
+ let Word1{6-5} = omod;
+ let Word1{17-7} = alu_inst;
+}
+
+class R600ALU_Word1_OP3 <bits<5> alu_inst> : R600ALU_Word1{
+
+ bits<11> src2;
+ bits<1> src2_rel;
+ bits<1> src2_neg;
+
+ bits<9> src2_sel = src2{8-0};
+ bits<2> src2_chan = src2{10-9};
+
+ let Word1{8-0} = src2_sel;
+ let Word1{9} = src2_rel;
+ let Word1{11-10} = src2_chan;
+ let Word1{12} = src2_neg;
+ let Word1{17-13} = alu_inst;
+}
+
+class R600LDS_Word1 {
+ field bits<32> Word1;
+
+ bits<11> src2;
+ bits<9> src2_sel = src2{8-0};
+ bits<2> src2_chan = src2{10-9};
+ bits<1> src2_rel;
+ // offset specifies the stride offset to the second set of data to be read
+ // from. This is a dword offset.
+ bits<5> alu_inst = 17; // OP3_INST_LDS_IDX_OP
+ bits<3> bank_swizzle;
+ bits<6> lds_op;
+ bits<2> dst_chan = 0;
+
+ let Word1{8-0} = src2_sel;
+ let Word1{9} = src2_rel;
+ let Word1{11-10} = src2_chan;
+ let Word1{17-13} = alu_inst;
+ let Word1{20-18} = bank_swizzle;
+ let Word1{26-21} = lds_op;
+ let Word1{30-29} = dst_chan;
+}
+
+
+/*
+XXX: R600 subtarget uses a slightly different encoding than the other
+subtargets. We currently handle this in R600MCCodeEmitter, but we may
+want to use these instruction classes in the future.
+
+class R600ALU_Word1_OP2_r600 : R600ALU_Word1_OP2 {
+
+ bits<1> fog_merge;
+ bits<10> alu_inst;
+
+ let Inst{37} = fog_merge;
+ let Inst{39-38} = omod;
+ let Inst{49-40} = alu_inst;
+}
+
+class R600ALU_Word1_OP2_r700 : R600ALU_Word1_OP2 {
+
+ bits<11> alu_inst;
+
+ let Inst{38-37} = omod;
+ let Inst{49-39} = alu_inst;
+}
+*/
+
+//===----------------------------------------------------------------------===//
+// Vertex Fetch instructions
+//===----------------------------------------------------------------------===//
+
+class VTX_WORD0 {
+ field bits<32> Word0;
+ bits<7> src_gpr;
+ bits<5> VC_INST;
+ bits<2> FETCH_TYPE;
+ bits<1> FETCH_WHOLE_QUAD;
+ bits<8> BUFFER_ID;
+ bits<1> SRC_REL;
+ bits<2> SRC_SEL_X;
+
+ let Word0{4-0} = VC_INST;
+ let Word0{6-5} = FETCH_TYPE;
+ let Word0{7} = FETCH_WHOLE_QUAD;
+ let Word0{15-8} = BUFFER_ID;
+ let Word0{22-16} = src_gpr;
+ let Word0{23} = SRC_REL;
+ let Word0{25-24} = SRC_SEL_X;
+}
+
+class VTX_WORD0_eg : VTX_WORD0 {
+
+ bits<6> MEGA_FETCH_COUNT;
+
+ let Word0{31-26} = MEGA_FETCH_COUNT;
+}
+
+class VTX_WORD0_cm : VTX_WORD0 {
+
+ bits<2> SRC_SEL_Y;
+ bits<2> STRUCTURED_READ;
+ bits<1> LDS_REQ;
+ bits<1> COALESCED_READ;
+
+ let Word0{27-26} = SRC_SEL_Y;
+ let Word0{29-28} = STRUCTURED_READ;
+ let Word0{30} = LDS_REQ;
+ let Word0{31} = COALESCED_READ;
+}
+
+class VTX_WORD1_GPR {
+ field bits<32> Word1;
+ bits<7> dst_gpr;
+ bits<1> DST_REL;
+ bits<3> DST_SEL_X;
+ bits<3> DST_SEL_Y;
+ bits<3> DST_SEL_Z;
+ bits<3> DST_SEL_W;
+ bits<1> USE_CONST_FIELDS;
+ bits<6> DATA_FORMAT;
+ bits<2> NUM_FORMAT_ALL;
+ bits<1> FORMAT_COMP_ALL;
+ bits<1> SRF_MODE_ALL;
+
+ let Word1{6-0} = dst_gpr;
+ let Word1{7} = DST_REL;
+ let Word1{8} = 0; // Reserved
+ let Word1{11-9} = DST_SEL_X;
+ let Word1{14-12} = DST_SEL_Y;
+ let Word1{17-15} = DST_SEL_Z;
+ let Word1{20-18} = DST_SEL_W;
+ let Word1{21} = USE_CONST_FIELDS;
+ let Word1{27-22} = DATA_FORMAT;
+ let Word1{29-28} = NUM_FORMAT_ALL;
+ let Word1{30} = FORMAT_COMP_ALL;
+ let Word1{31} = SRF_MODE_ALL;
+}
+
+//===----------------------------------------------------------------------===//
+// Texture fetch instructions
+//===----------------------------------------------------------------------===//
+
+class TEX_WORD0 {
+ field bits<32> Word0;
+
+ bits<5> TEX_INST;
+ bits<2> INST_MOD;
+ bits<1> FETCH_WHOLE_QUAD;
+ bits<8> RESOURCE_ID;
+ bits<7> SRC_GPR;
+ bits<1> SRC_REL;
+ bits<1> ALT_CONST;
+ bits<2> RESOURCE_INDEX_MODE;
+ bits<2> SAMPLER_INDEX_MODE;
+
+ let Word0{4-0} = TEX_INST;
+ let Word0{6-5} = INST_MOD;
+ let Word0{7} = FETCH_WHOLE_QUAD;
+ let Word0{15-8} = RESOURCE_ID;
+ let Word0{22-16} = SRC_GPR;
+ let Word0{23} = SRC_REL;
+ let Word0{24} = ALT_CONST;
+ let Word0{26-25} = RESOURCE_INDEX_MODE;
+ let Word0{28-27} = SAMPLER_INDEX_MODE;
+}
+
+class TEX_WORD1 {
+ field bits<32> Word1;
+
+ bits<7> DST_GPR;
+ bits<1> DST_REL;
+ bits<3> DST_SEL_X;
+ bits<3> DST_SEL_Y;
+ bits<3> DST_SEL_Z;
+ bits<3> DST_SEL_W;
+ bits<7> LOD_BIAS;
+ bits<1> COORD_TYPE_X;
+ bits<1> COORD_TYPE_Y;
+ bits<1> COORD_TYPE_Z;
+ bits<1> COORD_TYPE_W;
+
+ let Word1{6-0} = DST_GPR;
+ let Word1{7} = DST_REL;
+ let Word1{11-9} = DST_SEL_X;
+ let Word1{14-12} = DST_SEL_Y;
+ let Word1{17-15} = DST_SEL_Z;
+ let Word1{20-18} = DST_SEL_W;
+ let Word1{27-21} = LOD_BIAS;
+ let Word1{28} = COORD_TYPE_X;
+ let Word1{29} = COORD_TYPE_Y;
+ let Word1{30} = COORD_TYPE_Z;
+ let Word1{31} = COORD_TYPE_W;
+}
+
+class TEX_WORD2 {
+ field bits<32> Word2;
+
+ bits<5> OFFSET_X;
+ bits<5> OFFSET_Y;
+ bits<5> OFFSET_Z;
+ bits<5> SAMPLER_ID;
+ bits<3> SRC_SEL_X;
+ bits<3> SRC_SEL_Y;
+ bits<3> SRC_SEL_Z;
+ bits<3> SRC_SEL_W;
+
+ let Word2{4-0} = OFFSET_X;
+ let Word2{9-5} = OFFSET_Y;
+ let Word2{14-10} = OFFSET_Z;
+ let Word2{19-15} = SAMPLER_ID;
+ let Word2{22-20} = SRC_SEL_X;
+ let Word2{25-23} = SRC_SEL_Y;
+ let Word2{28-26} = SRC_SEL_Z;
+ let Word2{31-29} = SRC_SEL_W;
+}
+
+//===----------------------------------------------------------------------===//
+// Control Flow Instructions
+//===----------------------------------------------------------------------===//
+
+class CF_WORD1_R600 {
+ field bits<32> Word1;
+
+ bits<3> POP_COUNT;
+ bits<5> CF_CONST;
+ bits<2> COND;
+ bits<3> COUNT;
+ bits<6> CALL_COUNT;
+ bits<1> COUNT_3;
+ bits<1> END_OF_PROGRAM;
+ bits<1> VALID_PIXEL_MODE;
+ bits<7> CF_INST;
+ bits<1> WHOLE_QUAD_MODE;
+ bits<1> BARRIER;
+
+ let Word1{2-0} = POP_COUNT;
+ let Word1{7-3} = CF_CONST;
+ let Word1{9-8} = COND;
+ let Word1{12-10} = COUNT;
+ let Word1{18-13} = CALL_COUNT;
+ let Word1{19} = COUNT_3;
+ let Word1{21} = END_OF_PROGRAM;
+ let Word1{22} = VALID_PIXEL_MODE;
+ let Word1{29-23} = CF_INST;
+ let Word1{30} = WHOLE_QUAD_MODE;
+ let Word1{31} = BARRIER;
+}
+
+class CF_WORD0_EG {
+ field bits<32> Word0;
+
+ bits<24> ADDR;
+ bits<3> JUMPTABLE_SEL;
+
+ let Word0{23-0} = ADDR;
+ let Word0{26-24} = JUMPTABLE_SEL;
+}
+
+class CF_WORD1_EG {
+ field bits<32> Word1;
+
+ bits<3> POP_COUNT;
+ bits<5> CF_CONST;
+ bits<2> COND;
+ bits<6> COUNT;
+ bits<1> VALID_PIXEL_MODE;
+ bits<1> END_OF_PROGRAM;
+ bits<8> CF_INST;
+ bits<1> BARRIER;
+
+ let Word1{2-0} = POP_COUNT;
+ let Word1{7-3} = CF_CONST;
+ let Word1{9-8} = COND;
+ let Word1{15-10} = COUNT;
+ let Word1{20} = VALID_PIXEL_MODE;
+ let Word1{21} = END_OF_PROGRAM;
+ let Word1{29-22} = CF_INST;
+ let Word1{31} = BARRIER;
+}
+
+class CF_ALU_WORD0 {
+ field bits<32> Word0;
+
+ bits<22> ADDR;
+ bits<4> KCACHE_BANK0;
+ bits<4> KCACHE_BANK1;
+ bits<2> KCACHE_MODE0;
+
+ let Word0{21-0} = ADDR;
+ let Word0{25-22} = KCACHE_BANK0;
+ let Word0{29-26} = KCACHE_BANK1;
+ let Word0{31-30} = KCACHE_MODE0;
+}
+
+class CF_ALU_WORD1 {
+ field bits<32> Word1;
+
+ bits<2> KCACHE_MODE1;
+ bits<8> KCACHE_ADDR0;
+ bits<8> KCACHE_ADDR1;
+ bits<7> COUNT;
+ bits<1> ALT_CONST;
+ bits<4> CF_INST;
+ bits<1> WHOLE_QUAD_MODE;
+ bits<1> BARRIER;
+
+ let Word1{1-0} = KCACHE_MODE1;
+ let Word1{9-2} = KCACHE_ADDR0;
+ let Word1{17-10} = KCACHE_ADDR1;
+ let Word1{24-18} = COUNT;
+ let Word1{25} = ALT_CONST;
+ let Word1{29-26} = CF_INST;
+ let Word1{30} = WHOLE_QUAD_MODE;
+ let Word1{31} = BARRIER;
+}
+
+class CF_ALLOC_EXPORT_WORD0_RAT {
+ field bits<32> Word0;
+
+ bits<4> rat_id;
+ bits<6> rat_inst;
+ bits<2> rim;
+ bits<2> type;
+ bits<7> rw_gpr;
+ bits<1> rw_rel;
+ bits<7> index_gpr;
+ bits<2> elem_size;
+
+ let Word0{3-0} = rat_id;
+ let Word0{9-4} = rat_inst;
+ let Word0{10} = 0; // Reserved
+ let Word0{12-11} = rim;
+ let Word0{14-13} = type;
+ let Word0{21-15} = rw_gpr;
+ let Word0{22} = rw_rel;
+ let Word0{29-23} = index_gpr;
+ let Word0{31-30} = elem_size;
+}
+
+class CF_ALLOC_EXPORT_WORD1_BUF {
+ field bits<32> Word1;
+
+ bits<12> array_size;
+ bits<4> comp_mask;
+ bits<4> burst_count;
+ bits<1> vpm;
+ bits<1> eop;
+ bits<8> cf_inst;
+ bits<1> mark;
+ bits<1> barrier;
+
+ let Word1{11-0} = array_size;
+ let Word1{15-12} = comp_mask;
+ let Word1{19-16} = burst_count;
+ let Word1{20} = vpm;
+ let Word1{21} = eop;
+ let Word1{29-22} = cf_inst;
+ let Word1{30} = mark;
+ let Word1{31} = barrier;
+}
diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp
index 37150c4..c0827fc 100644
--- a/lib/Target/R600/R600InstrInfo.cpp
+++ b/lib/Target/R600/R600InstrInfo.cpp
@@ -19,18 +19,18 @@
#include "R600Defines.h"
#include "R600MachineFunctionInfo.h"
#include "R600RegisterInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "AMDGPUGenDFAPacketizer.inc"
using namespace llvm;
R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
: AMDGPUInstrInfo(tm),
- RI(tm, *this),
+ RI(tm),
ST(tm.getSubtarget<AMDGPUSubtarget>())
{ }
@@ -51,9 +51,17 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
- if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
- && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
- for (unsigned I = 0; I < 4; I++) {
+ unsigned VectorComponents = 0;
+ if (AMDGPU::R600_Reg128RegClass.contains(DestReg) &&
+ AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
+ VectorComponents = 4;
+ } else if(AMDGPU::R600_Reg64RegClass.contains(DestReg) &&
+ AMDGPU::R600_Reg64RegClass.contains(SrcReg)) {
+ VectorComponents = 2;
+ }
+
+ if (VectorComponents > 0) {
+ for (unsigned I = 0; I < VectorComponents; I++) {
unsigned SubRegIndex = RI.getSubRegFromChannel(I);
buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
RI.getSubReg(DestReg, SubRegIndex),
@@ -62,28 +70,23 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
RegState::Define | RegState::Implicit);
}
} else {
-
- // We can't copy vec4 registers
- assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
- && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
-
MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
DestReg, SrcReg);
- NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
+ NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
.setIsKill(KillSrc);
}
}
-MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
- unsigned DstReg, int64_t Imm) const {
- MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
- MachineInstrBuilder MIB(*MF, MI);
- MIB.addReg(DstReg, RegState::Define);
- MIB.addReg(AMDGPU::ALU_LITERAL_X);
- MIB.addImm(Imm);
- MIB.addReg(0); // PREDICATE_BIT
-
- return MI;
+/// \returns true if \p MBBI can be moved into a new basic.
+bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const {
+ for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
+ E = MBBI->operands_end(); I != E; ++I) {
+ if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
+ I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
+ return false;
+ }
+ return true;
}
unsigned R600InstrInfo::getIEQOpcode() const {
@@ -114,12 +117,7 @@ bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
}
bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
- switch(Opcode) {
- default: return false;
- case AMDGPU::DOT4_r600_pseudo:
- case AMDGPU::DOT4_eg_pseudo:
- return true;
- }
+ return false;
}
bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
@@ -136,19 +134,73 @@ bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
unsigned TargetFlags = get(Opcode).TSFlags;
+ return (TargetFlags & R600_InstFlag::ALU_INST);
+}
+
+bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
+ unsigned TargetFlags = get(Opcode).TSFlags;
+
return ((TargetFlags & R600_InstFlag::OP1) |
(TargetFlags & R600_InstFlag::OP2) |
(TargetFlags & R600_InstFlag::OP3));
}
+bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
+ unsigned TargetFlags = get(Opcode).TSFlags;
+
+ return ((TargetFlags & R600_InstFlag::LDS_1A) |
+ (TargetFlags & R600_InstFlag::LDS_1A1D) |
+ (TargetFlags & R600_InstFlag::LDS_1A2D));
+}
+
+bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const {
+ return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1;
+}
+
+bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
+ return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
+}
+
+bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const {
+ if (isALUInstr(MI->getOpcode()))
+ return true;
+ if (isVector(*MI) || isCubeOp(MI->getOpcode()))
+ return true;
+ switch (MI->getOpcode()) {
+ case AMDGPU::PRED_X:
+ case AMDGPU::INTERP_PAIR_XY:
+ case AMDGPU::INTERP_PAIR_ZW:
+ case AMDGPU::INTERP_VEC_LOAD:
+ case AMDGPU::COPY:
+ case AMDGPU::DOT_4:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
- return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY);
+ if (ST.hasCaymanISA())
+ return false;
+ return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
}
bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
return isTransOnly(MI->getOpcode());
}
+bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
+ return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
+}
+
+bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const {
+ return isVectorOnly(MI->getOpcode());
+}
+
+bool R600InstrInfo::isExport(unsigned Opcode) const {
+ return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
+}
+
bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
return ST.hasVertexCache() && IS_VTX(get(Opcode));
}
@@ -168,6 +220,380 @@ bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
usesTextureCache(MI->getOpcode());
}
+bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
+ switch (Opcode) {
+ case AMDGPU::KILLGT:
+ case AMDGPU::GROUP_BARRIER:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const {
+ return MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
+}
+
+bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const {
+ return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
+}
+
+bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
+ if (!isALUInstr(MI->getOpcode())) {
+ return false;
+ }
+ for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
+ E = MI->operands_end(); I != E; ++I) {
+ if (!I->isReg() || !I->isUse() ||
+ TargetRegisterInfo::isVirtualRegister(I->getReg()))
+ continue;
+
+ if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
+ return true;
+ }
+ return false;
+}
+
+int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const {
+ static const unsigned OpTable[] = {
+ AMDGPU::OpName::src0,
+ AMDGPU::OpName::src1,
+ AMDGPU::OpName::src2
+ };
+
+ assert (SrcNum < 3);
+ return getOperandIdx(Opcode, OpTable[SrcNum]);
+}
+
+#define SRC_SEL_ROWS 11
+int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
+ static const unsigned SrcSelTable[SRC_SEL_ROWS][2] = {
+ {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
+ {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
+ {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
+ {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
+ {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
+ {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
+ {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
+ {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
+ {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
+ {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
+ {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
+ };
+
+ for (unsigned i = 0; i < SRC_SEL_ROWS; ++i) {
+ if (getOperandIdx(Opcode, SrcSelTable[i][0]) == (int)SrcIdx) {
+ return getOperandIdx(Opcode, SrcSelTable[i][1]);
+ }
+ }
+ return -1;
+}
+#undef SRC_SEL_ROWS
+
+SmallVector<std::pair<MachineOperand *, int64_t>, 3>
+R600InstrInfo::getSrcs(MachineInstr *MI) const {
+ SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
+
+ if (MI->getOpcode() == AMDGPU::DOT_4) {
+ static const unsigned OpTable[8][2] = {
+ {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
+ {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
+ {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
+ {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
+ {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
+ {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
+ {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
+ {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
+ };
+
+ for (unsigned j = 0; j < 8; j++) {
+ MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
+ OpTable[j][0]));
+ unsigned Reg = MO.getReg();
+ if (Reg == AMDGPU::ALU_CONST) {
+ unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
+ OpTable[j][1])).getImm();
+ Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
+ continue;
+ }
+
+ }
+ return Result;
+ }
+
+ static const unsigned OpTable[3][2] = {
+ {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
+ {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
+ {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
+ };
+
+ for (unsigned j = 0; j < 3; j++) {
+ int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
+ if (SrcIdx < 0)
+ break;
+ MachineOperand &MO = MI->getOperand(SrcIdx);
+ unsigned Reg = MI->getOperand(SrcIdx).getReg();
+ if (Reg == AMDGPU::ALU_CONST) {
+ unsigned Sel = MI->getOperand(
+ getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
+ Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
+ continue;
+ }
+ if (Reg == AMDGPU::ALU_LITERAL_X) {
+ unsigned Imm = MI->getOperand(
+ getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
+ Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
+ continue;
+ }
+ Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
+ }
+ return Result;
+}
+
+std::vector<std::pair<int, unsigned> >
+R600InstrInfo::ExtractSrcs(MachineInstr *MI,
+ const DenseMap<unsigned, unsigned> &PV,
+ unsigned &ConstCount) const {
+ ConstCount = 0;
+ const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
+ const std::pair<int, unsigned> DummyPair(-1, 0);
+ std::vector<std::pair<int, unsigned> > Result;
+ unsigned i = 0;
+ for (unsigned n = Srcs.size(); i < n; ++i) {
+ unsigned Reg = Srcs[i].first->getReg();
+ unsigned Index = RI.getEncodingValue(Reg) & 0xff;
+ if (Reg == AMDGPU::OQAP) {
+ Result.push_back(std::pair<int, unsigned>(Index, 0));
+ }
+ if (PV.find(Reg) != PV.end()) {
+ // 255 is used to tells its a PS/PV reg
+ Result.push_back(std::pair<int, unsigned>(255, 0));
+ continue;
+ }
+ if (Index > 127) {
+ ConstCount++;
+ Result.push_back(DummyPair);
+ continue;
+ }
+ unsigned Chan = RI.getHWRegChan(Reg);
+ Result.push_back(std::pair<int, unsigned>(Index, Chan));
+ }
+ for (; i < 3; ++i)
+ Result.push_back(DummyPair);
+ return Result;
+}
+
+static std::vector<std::pair<int, unsigned> >
+Swizzle(std::vector<std::pair<int, unsigned> > Src,
+ R600InstrInfo::BankSwizzle Swz) {
+ if (Src[0] == Src[1])
+ Src[1].first = -1;
+ switch (Swz) {
+ case R600InstrInfo::ALU_VEC_012_SCL_210:
+ break;
+ case R600InstrInfo::ALU_VEC_021_SCL_122:
+ std::swap(Src[1], Src[2]);
+ break;
+ case R600InstrInfo::ALU_VEC_102_SCL_221:
+ std::swap(Src[0], Src[1]);
+ break;
+ case R600InstrInfo::ALU_VEC_120_SCL_212:
+ std::swap(Src[0], Src[1]);
+ std::swap(Src[0], Src[2]);
+ break;
+ case R600InstrInfo::ALU_VEC_201:
+ std::swap(Src[0], Src[2]);
+ std::swap(Src[0], Src[1]);
+ break;
+ case R600InstrInfo::ALU_VEC_210:
+ std::swap(Src[0], Src[2]);
+ break;
+ }
+ return Src;
+}
+
+static unsigned
+getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
+ switch (Swz) {
+ case R600InstrInfo::ALU_VEC_012_SCL_210: {
+ unsigned Cycles[3] = { 2, 1, 0};
+ return Cycles[Op];
+ }
+ case R600InstrInfo::ALU_VEC_021_SCL_122: {
+ unsigned Cycles[3] = { 1, 2, 2};
+ return Cycles[Op];
+ }
+ case R600InstrInfo::ALU_VEC_120_SCL_212: {
+ unsigned Cycles[3] = { 2, 1, 2};
+ return Cycles[Op];
+ }
+ case R600InstrInfo::ALU_VEC_102_SCL_221: {
+ unsigned Cycles[3] = { 2, 2, 1};
+ return Cycles[Op];
+ }
+ default:
+ llvm_unreachable("Wrong Swizzle for Trans Slot");
+ return 0;
+ }
+}
+
+/// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
+/// in the same Instruction Group while meeting read port limitations given a
+/// Swz swizzle sequence.
+unsigned R600InstrInfo::isLegalUpTo(
+ const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
+ const std::vector<R600InstrInfo::BankSwizzle> &Swz,
+ const std::vector<std::pair<int, unsigned> > &TransSrcs,
+ R600InstrInfo::BankSwizzle TransSwz) const {
+ int Vector[4][3];
+ memset(Vector, -1, sizeof(Vector));
+ for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
+ const std::vector<std::pair<int, unsigned> > &Srcs =
+ Swizzle(IGSrcs[i], Swz[i]);
+ for (unsigned j = 0; j < 3; j++) {
+ const std::pair<int, unsigned> &Src = Srcs[j];
+ if (Src.first < 0 || Src.first == 255)
+ continue;
+ if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
+ if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
+ Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
+ // The value from output queue A (denoted by register OQAP) can
+ // only be fetched during the first cycle.
+ return false;
+ }
+ // OQAP does not count towards the normal read port restrictions
+ continue;
+ }
+ if (Vector[Src.second][j] < 0)
+ Vector[Src.second][j] = Src.first;
+ if (Vector[Src.second][j] != Src.first)
+ return i;
+ }
+ }
+ // Now check Trans Alu
+ for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
+ const std::pair<int, unsigned> &Src = TransSrcs[i];
+ unsigned Cycle = getTransSwizzle(TransSwz, i);
+ if (Src.first < 0)
+ continue;
+ if (Src.first == 255)
+ continue;
+ if (Vector[Src.second][Cycle] < 0)
+ Vector[Src.second][Cycle] = Src.first;
+ if (Vector[Src.second][Cycle] != Src.first)
+ return IGSrcs.size() - 1;
+ }
+ return IGSrcs.size();
+}
+
+/// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
+/// (in lexicographic term) swizzle sequence assuming that all swizzles after
+/// Idx can be skipped
+static bool
+NextPossibleSolution(
+ std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
+ unsigned Idx) {
+ assert(Idx < SwzCandidate.size());
+ int ResetIdx = Idx;
+ while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
+ ResetIdx --;
+ for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
+ SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
+ }
+ if (ResetIdx == -1)
+ return false;
+ int NextSwizzle = SwzCandidate[ResetIdx] + 1;
+ SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
+ return true;
+}
+
+/// Enumerate all possible Swizzle sequence to find one that can meet all
+/// read port requirements.
+bool R600InstrInfo::FindSwizzleForVectorSlot(
+ const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
+ std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
+ const std::vector<std::pair<int, unsigned> > &TransSrcs,
+ R600InstrInfo::BankSwizzle TransSwz) const {
+ unsigned ValidUpTo = 0;
+ do {
+ ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
+ if (ValidUpTo == IGSrcs.size())
+ return true;
+ } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
+ return false;
+}
+
+/// Instructions in Trans slot can't read gpr at cycle 0 if they also read
+/// a const, and can't read a gpr at cycle 1 if they read 2 const.
+static bool
+isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
+ const std::vector<std::pair<int, unsigned> > &TransOps,
+ unsigned ConstCount) {
+ // TransALU can't read 3 constants
+ if (ConstCount > 2)
+ return false;
+ for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
+ const std::pair<int, unsigned> &Src = TransOps[i];
+ unsigned Cycle = getTransSwizzle(TransSwz, i);
+ if (Src.first < 0)
+ continue;
+ if (ConstCount > 0 && Cycle == 0)
+ return false;
+ if (ConstCount > 1 && Cycle == 1)
+ return false;
+ }
+ return true;
+}
+
+bool
+R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
+ const DenseMap<unsigned, unsigned> &PV,
+ std::vector<BankSwizzle> &ValidSwizzle,
+ bool isLastAluTrans)
+ const {
+ //Todo : support shared src0 - src1 operand
+
+ std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
+ ValidSwizzle.clear();
+ unsigned ConstCount;
+ BankSwizzle TransBS = ALU_VEC_012_SCL_210;
+ for (unsigned i = 0, e = IG.size(); i < e; ++i) {
+ IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
+ unsigned Op = getOperandIdx(IG[i]->getOpcode(),
+ AMDGPU::OpName::bank_swizzle);
+ ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
+ IG[i]->getOperand(Op).getImm());
+ }
+ std::vector<std::pair<int, unsigned> > TransOps;
+ if (!isLastAluTrans)
+ return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
+
+ TransOps = IGSrcs.back();
+ IGSrcs.pop_back();
+ ValidSwizzle.pop_back();
+
+ static const R600InstrInfo::BankSwizzle TransSwz[] = {
+ ALU_VEC_012_SCL_210,
+ ALU_VEC_021_SCL_122,
+ ALU_VEC_120_SCL_212,
+ ALU_VEC_102_SCL_221
+ };
+ for (unsigned i = 0; i < 4; i++) {
+ TransBS = TransSwz[i];
+ if (!isConstCompatible(TransBS, TransOps, ConstCount))
+ continue;
+ bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
+ TransBS);
+ if (Result) {
+ ValidSwizzle.push_back(TransBS);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
bool
R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
const {
@@ -194,37 +620,31 @@ R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
}
bool
-R600InstrInfo::canBundle(const std::vector<MachineInstr *> &MIs) const {
+R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
+ const {
std::vector<unsigned> Consts;
+ SmallSet<int64_t, 4> Literals;
for (unsigned i = 0, n = MIs.size(); i < n; i++) {
- const MachineInstr *MI = MIs[i];
-
- const R600Operands::Ops OpTable[3][2] = {
- {R600Operands::SRC0, R600Operands::SRC0_SEL},
- {R600Operands::SRC1, R600Operands::SRC1_SEL},
- {R600Operands::SRC2, R600Operands::SRC2_SEL},
- };
-
+ MachineInstr *MI = MIs[i];
if (!isALUInstr(MI->getOpcode()))
continue;
- for (unsigned j = 0; j < 3; j++) {
- int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
- if (SrcIdx < 0)
- break;
- unsigned Reg = MI->getOperand(SrcIdx).getReg();
- if (Reg == AMDGPU::ALU_CONST) {
- unsigned Const = MI->getOperand(
- getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
- Consts.push_back(Const);
- continue;
- }
- if (AMDGPU::R600_KC0RegClass.contains(Reg) ||
- AMDGPU::R600_KC1RegClass.contains(Reg)) {
- unsigned Index = RI.getEncodingValue(Reg) & 0xff;
- unsigned Chan = RI.getHWRegChan(Reg);
+ const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs =
+ getSrcs(MI);
+
+ for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
+ std::pair<MachineOperand *, unsigned> Src = Srcs[j];
+ if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
+ Literals.insert(Src.second);
+ if (Literals.size() > 4)
+ return false;
+ if (Src.first->getReg() == AMDGPU::ALU_CONST)
+ Consts.push_back(Src.second);
+ if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
+ AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
+ unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
+ unsigned Chan = RI.getHWRegChan(Src.first->getReg());
Consts.push_back((Index << 2) | Chan);
- continue;
}
}
}
@@ -265,6 +685,11 @@ bool isJump(unsigned Opcode) {
return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
}
+static bool isBranch(unsigned Opcode) {
+ return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
+ Opcode == AMDGPU::BRANCH_COND_f32;
+}
+
bool
R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
@@ -283,6 +708,10 @@ R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return false;
--I;
}
+ // AMDGPU::BRANCH* instructions are only available after isel and are not
+ // handled
+ if (isBranch(I->getOpcode()))
+ return true;
if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
return false;
}
@@ -343,6 +772,17 @@ int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
};
}
+static
+MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
+ for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
+ It != E; ++It) {
+ if (It->getOpcode() == AMDGPU::CF_ALU ||
+ It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
+ return llvm::prior(It.base());
+ }
+ return MBB.end();
+}
+
unsigned
R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
@@ -364,6 +804,11 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
.addMBB(TBB)
.addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
+ MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
+ if (CfAlu == MBB.end())
+ return 1;
+ assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
+ CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
return 1;
}
} else {
@@ -375,6 +820,11 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
.addMBB(TBB)
.addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
+ MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
+ if (CfAlu == MBB.end())
+ return 2;
+ assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
+ CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
return 2;
}
}
@@ -398,6 +848,11 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
clearFlag(predSet, 0, MO_FLAG_PUSH);
I->eraseFromParent();
+ MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
+ if (CfAlu == MBB.end())
+ break;
+ assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
+ CfAlu->setDesc(get(AMDGPU::CF_ALU));
break;
}
case AMDGPU::JUMP:
@@ -418,6 +873,11 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
clearFlag(predSet, 0, MO_FLAG_PUSH);
I->eraseFromParent();
+ MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
+ if (CfAlu == MBB.end())
+ break;
+ assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
+ CfAlu->setDesc(get(AMDGPU::CF_ALU));
break;
}
case AMDGPU::JUMP:
@@ -452,6 +912,15 @@ R600InstrInfo::isPredicable(MachineInstr *MI) const {
if (MI->getOpcode() == AMDGPU::KILLGT) {
return false;
+ } else if (MI->getOpcode() == AMDGPU::CF_ALU) {
+ // If the clause start in the middle of MBB then the MBB has more
+ // than a single clause, unable to predicate several clauses.
+ if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI))
+ return false;
+ // TODO: We don't support KC merging atm
+ if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0)
+ return false;
+ return true;
} else if (isVector(*MI)) {
return false;
} else {
@@ -547,6 +1016,25 @@ R600InstrInfo::PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const {
int PIdx = MI->findFirstPredOperandIdx();
+ if (MI->getOpcode() == AMDGPU::CF_ALU) {
+ MI->getOperand(8).setImm(0);
+ return true;
+ }
+
+ if (MI->getOpcode() == AMDGPU::DOT_4) {
+ MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_X))
+ .setReg(Pred[2].getReg());
+ MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Y))
+ .setReg(Pred[2].getReg());
+ MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Z))
+ .setReg(Pred[2].getReg());
+ MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_W))
+ .setReg(Pred[2].getReg());
+ MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
+ MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
+ return true;
+ }
+
if (PIdx != -1) {
MachineOperand &PMO = MI->getOperand(PIdx);
PMO.setReg(Pred[2].getReg());
@@ -558,6 +1046,10 @@ R600InstrInfo::PredicateInstruction(MachineInstr *MI,
return false;
}
+unsigned int R600InstrInfo::getPredicationCost(const MachineInstr *) const {
+ return 2;
+}
+
unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost) const {
@@ -566,67 +1058,25 @@ unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
return 2;
}
-int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- int Offset = 0;
-
- if (MFI->getNumObjects() == 0) {
- return -1;
- }
-
- if (MRI.livein_empty()) {
- return 0;
- }
-
- for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
- LE = MRI.livein_end();
- LI != LE; ++LI) {
- Offset = std::max(Offset,
- GET_REG_INDEX(RI.getEncodingValue(LI->first)));
- }
-
- return Offset + 1;
-}
-
-int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
- int Offset = 0;
- const MachineFrameInfo *MFI = MF.getFrameInfo();
-
- // Variable sized objects are not supported
- assert(!MFI->hasVarSizedObjects());
-
- if (MFI->getNumObjects() == 0) {
- return -1;
- }
-
- Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
-
- return getIndirectIndexBegin(MF) + Offset;
-}
-
-std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs(
+void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
const MachineFunction &MF) const {
const AMDGPUFrameLowering *TFL =
static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
- std::vector<unsigned> Regs;
unsigned StackWidth = TFL->getStackWidth(MF);
int End = getIndirectIndexEnd(MF);
- if (End == -1) {
- return Regs;
- }
+ if (End == -1)
+ return;
for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
- Regs.push_back(SuperReg);
+ Reserved.set(SuperReg);
for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
- Regs.push_back(Reg);
+ Reserved.set(Reg);
}
}
- return Regs;
}
unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
@@ -636,13 +1086,8 @@ unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
return RegIndex;
}
-const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass(
- unsigned SourceReg) const {
- return &AMDGPU::R600_TReg32RegClass;
-}
-
-const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const {
- return &AMDGPU::TRegMemRegClass;
+const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
+ return &AMDGPU::R600_TReg32_XRegClass;
}
MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
@@ -652,12 +1097,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
AMDGPU::AR_X, OffsetReg);
- setImmOperand(MOVA, R600Operands::WRITE, 0);
+ setImmOperand(MOVA, AMDGPU::OpName::write, 0);
MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
AddrReg, ValueReg)
- .addReg(AMDGPU::AR_X, RegState::Implicit);
- setImmOperand(Mov, R600Operands::DST_REL, 1);
+ .addReg(AMDGPU::AR_X,
+ RegState::Implicit | RegState::Kill);
+ setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
return Mov;
}
@@ -669,20 +1115,17 @@ MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
AMDGPU::AR_X,
OffsetReg);
- setImmOperand(MOVA, R600Operands::WRITE, 0);
+ setImmOperand(MOVA, AMDGPU::OpName::write, 0);
MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
ValueReg,
AddrReg)
- .addReg(AMDGPU::AR_X, RegState::Implicit);
- setImmOperand(Mov, R600Operands::SRC0_REL, 1);
+ .addReg(AMDGPU::AR_X,
+ RegState::Implicit | RegState::Kill);
+ setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
return Mov;
}
-const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const {
- return &AMDGPU::IndirectRegRegClass;
-}
-
unsigned R600InstrInfo::getMaxAlusPerClause() const {
return 115;
}
@@ -728,52 +1171,118 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB
return MIB;
}
+#define OPERAND_CASE(Label) \
+ case Label: { \
+ static const unsigned Ops[] = \
+ { \
+ Label##_X, \
+ Label##_Y, \
+ Label##_Z, \
+ Label##_W \
+ }; \
+ return Ops[Slot]; \
+ }
+
+static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
+ switch (Op) {
+ OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
+ OPERAND_CASE(AMDGPU::OpName::update_pred)
+ OPERAND_CASE(AMDGPU::OpName::write)
+ OPERAND_CASE(AMDGPU::OpName::omod)
+ OPERAND_CASE(AMDGPU::OpName::dst_rel)
+ OPERAND_CASE(AMDGPU::OpName::clamp)
+ OPERAND_CASE(AMDGPU::OpName::src0)
+ OPERAND_CASE(AMDGPU::OpName::src0_neg)
+ OPERAND_CASE(AMDGPU::OpName::src0_rel)
+ OPERAND_CASE(AMDGPU::OpName::src0_abs)
+ OPERAND_CASE(AMDGPU::OpName::src0_sel)
+ OPERAND_CASE(AMDGPU::OpName::src1)
+ OPERAND_CASE(AMDGPU::OpName::src1_neg)
+ OPERAND_CASE(AMDGPU::OpName::src1_rel)
+ OPERAND_CASE(AMDGPU::OpName::src1_abs)
+ OPERAND_CASE(AMDGPU::OpName::src1_sel)
+ OPERAND_CASE(AMDGPU::OpName::pred_sel)
+ default:
+ llvm_unreachable("Wrong Operand");
+ }
+}
+
+#undef OPERAND_CASE
+
+MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
+ MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
+ const {
+ assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
+ unsigned Opcode;
+ const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+ if (ST.getGeneration() <= AMDGPUSubtarget::R700)
+ Opcode = AMDGPU::DOT4_r600;
+ else
+ Opcode = AMDGPU::DOT4_eg;
+ MachineBasicBlock::iterator I = MI;
+ MachineOperand &Src0 = MI->getOperand(
+ getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
+ MachineOperand &Src1 = MI->getOperand(
+ getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
+ MachineInstr *MIB = buildDefaultInstruction(
+ MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
+ static const unsigned Operands[14] = {
+ AMDGPU::OpName::update_exec_mask,
+ AMDGPU::OpName::update_pred,
+ AMDGPU::OpName::write,
+ AMDGPU::OpName::omod,
+ AMDGPU::OpName::dst_rel,
+ AMDGPU::OpName::clamp,
+ AMDGPU::OpName::src0_neg,
+ AMDGPU::OpName::src0_rel,
+ AMDGPU::OpName::src0_abs,
+ AMDGPU::OpName::src0_sel,
+ AMDGPU::OpName::src1_neg,
+ AMDGPU::OpName::src1_rel,
+ AMDGPU::OpName::src1_abs,
+ AMDGPU::OpName::src1_sel,
+ };
+
+ MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
+ getSlotedOps(AMDGPU::OpName::pred_sel, Slot)));
+ MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel))
+ .setReg(MO.getReg());
+
+ for (unsigned i = 0; i < 14; i++) {
+ MachineOperand &MO = MI->getOperand(
+ getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
+ assert (MO.isImm());
+ setImmOperand(MIB, Operands[i], MO.getImm());
+ }
+ MIB->getOperand(20).setImm(0);
+ return MIB;
+}
+
MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
unsigned DstReg,
uint64_t Imm) const {
MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
AMDGPU::ALU_LITERAL_X);
- setImmOperand(MovImm, R600Operands::IMM, Imm);
+ setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
return MovImm;
}
-int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
- R600Operands::Ops Op) const {
- return getOperandIdx(MI.getOpcode(), Op);
+MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, unsigned SrcReg) const {
+ return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
}
-int R600InstrInfo::getOperandIdx(unsigned Opcode,
- R600Operands::Ops Op) const {
- unsigned TargetFlags = get(Opcode).TSFlags;
- unsigned OpTableIdx;
-
- if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
- switch (Op) {
- case R600Operands::DST: return 0;
- case R600Operands::SRC0: return 1;
- case R600Operands::SRC1: return 2;
- case R600Operands::SRC2: return 3;
- default:
- assert(!"Unknown operand type for instruction");
- return -1;
- }
- }
-
- if (TargetFlags & R600_InstFlag::OP1) {
- OpTableIdx = 0;
- } else if (TargetFlags & R600_InstFlag::OP2) {
- OpTableIdx = 1;
- } else {
- assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
- "for this instruction");
- OpTableIdx = 2;
- }
+int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
+ return getOperandIdx(MI.getOpcode(), Op);
+}
- return R600Operands::ALUOpTable[OpTableIdx][Op];
+int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
+ return AMDGPU::getNamedOperandIdx(Opcode, Op);
}
-void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
+void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
int64_t Imm) const {
int Idx = getOperandIdx(*MI, Op);
assert(Idx != -1 && "Operand not supported for this instruction.");
@@ -801,20 +1310,20 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
switch (Flag) {
case MO_FLAG_CLAMP:
- FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
+ FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
break;
case MO_FLAG_MASK:
- FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
+ FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
break;
case MO_FLAG_NOT_LAST:
case MO_FLAG_LAST:
- FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
+ FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
break;
case MO_FLAG_NEG:
switch (SrcIdx) {
- case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
- case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
- case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
+ case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
+ case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
+ case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
}
break;
@@ -823,8 +1332,8 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
"instructions.");
(void)IsOP3;
switch (SrcIdx) {
- case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
- case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
+ case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
+ case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
}
break;
diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h
index babe4b8..13d9810 100644
--- a/lib/Target/R600/R600InstrInfo.h
+++ b/lib/Target/R600/R600InstrInfo.h
@@ -16,7 +16,6 @@
#define R600INSTRUCTIONINFO_H_
#include "AMDGPUInstrInfo.h"
-#include "AMDIL.h"
#include "R600Defines.h"
#include "R600RegisterInfo.h"
#include <map>
@@ -36,8 +35,19 @@ namespace llvm {
const AMDGPUSubtarget &ST;
int getBranchInstr(const MachineOperand &op) const;
+ std::vector<std::pair<int, unsigned> >
+ ExtractSrcs(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PV, unsigned &ConstCount) const;
public:
+ enum BankSwizzle {
+ ALU_VEC_012_SCL_210 = 0,
+ ALU_VEC_021_SCL_122,
+ ALU_VEC_120_SCL_212,
+ ALU_VEC_102_SCL_221,
+ ALU_VEC_201,
+ ALU_VEC_210
+ };
+
explicit R600InstrInfo(AMDGPUTargetMachine &tm);
const R600RegisterInfo &getRegisterInfo() const;
@@ -45,6 +55,8 @@ namespace llvm {
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const;
+ bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const;
bool isTrig(const MachineInstr &MI) const;
bool isPlaceHolderOpcode(unsigned opcode) const;
@@ -53,25 +65,83 @@ namespace llvm {
/// \returns true if this \p Opcode represents an ALU instruction.
bool isALUInstr(unsigned Opcode) const;
+ bool hasInstrModifiers(unsigned Opcode) const;
+ bool isLDSInstr(unsigned Opcode) const;
+ bool isLDSNoRetInstr(unsigned Opcode) const;
+ bool isLDSRetInstr(unsigned Opcode) const;
+
+ /// \returns true if this \p Opcode represents an ALU instruction or an
+ /// instruction that will be lowered in ExpandSpecialInstrs Pass.
+ bool canBeConsideredALU(const MachineInstr *MI) const;
bool isTransOnly(unsigned Opcode) const;
bool isTransOnly(const MachineInstr *MI) const;
+ bool isVectorOnly(unsigned Opcode) const;
+ bool isVectorOnly(const MachineInstr *MI) const;
+ bool isExport(unsigned Opcode) const;
bool usesVertexCache(unsigned Opcode) const;
bool usesVertexCache(const MachineInstr *MI) const;
bool usesTextureCache(unsigned Opcode) const;
bool usesTextureCache(const MachineInstr *MI) const;
+ bool mustBeLastInClause(unsigned Opcode) const;
+ bool usesAddressRegister(MachineInstr *MI) const;
+ bool definesAddressRegister(MachineInstr *MI) const;
+ bool readsLDSSrcReg(const MachineInstr *MI) const;
+
+ /// \returns The operand index for the given source number. Legal values
+ /// for SrcNum are 0, 1, and 2.
+ int getSrcIdx(unsigned Opcode, unsigned SrcNum) const;
+ /// \returns The operand Index for the Sel operand given an index to one
+ /// of the instruction's src operands.
+ int getSelIdx(unsigned Opcode, unsigned SrcIdx) const;
+
+ /// \returns a pair for each src of an ALU instructions.
+ /// The first member of a pair is the register id.
+ /// If register is ALU_CONST, second member is SEL.
+ /// If register is ALU_LITERAL, second member is IMM.
+ /// Otherwise, second member value is undefined.
+ SmallVector<std::pair<MachineOperand *, int64_t>, 3>
+ getSrcs(MachineInstr *MI) const;
+
+ unsigned isLegalUpTo(
+ const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
+ const std::vector<R600InstrInfo::BankSwizzle> &Swz,
+ const std::vector<std::pair<int, unsigned> > &TransSrcs,
+ R600InstrInfo::BankSwizzle TransSwz) const;
+
+ bool FindSwizzleForVectorSlot(
+ const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
+ std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
+ const std::vector<std::pair<int, unsigned> > &TransSrcs,
+ R600InstrInfo::BankSwizzle TransSwz) const;
+
+ /// Given the order VEC_012 < VEC_021 < VEC_120 < VEC_102 < VEC_201 < VEC_210
+ /// returns true and the first (in lexical order) BankSwizzle affectation
+ /// starting from the one already provided in the Instruction Group MIs that
+ /// fits Read Port limitations in BS if available. Otherwise returns false
+ /// and undefined content in BS.
+ /// isLastAluTrans should be set if the last Alu of MIs will be executed on
+ /// Trans ALU. In this case, ValidTSwizzle returns the BankSwizzle value to
+ /// apply to the last instruction.
+ /// PV holds GPR to PV registers in the Instruction Group MIs.
+ bool fitsReadPortLimitations(const std::vector<MachineInstr *> &MIs,
+ const DenseMap<unsigned, unsigned> &PV,
+ std::vector<BankSwizzle> &BS,
+ bool isLastAluTrans) const;
+
+ /// An instruction group can only access 2 channel pair (either [XY] or [ZW])
+ /// from KCache bank on R700+. This function check if MI set in input meet
+ /// this limitations
+ bool fitsConstReadLimitations(const std::vector<MachineInstr *> &) const;
+ /// Same but using const index set instead of MI set.
bool fitsConstReadLimitations(const std::vector<unsigned>&) const;
- bool canBundle(const std::vector<MachineInstr *> &) const;
/// \breif Vector instructions are instructions that must fill all
/// instruction slots within an instruction group.
bool isVector(const MachineInstr &MI) const;
- virtual MachineInstr * getMovImmInstr(MachineFunction *MF, unsigned DstReg,
- int64_t Imm) const;
-
virtual unsigned getIEQOpcode() const;
virtual bool isMov(unsigned Opcode) const;
@@ -118,6 +188,8 @@ namespace llvm {
bool PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const;
+ unsigned int getPredicationCost(const MachineInstr *) const;
+
unsigned int getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost = 0) const;
@@ -125,22 +197,14 @@ namespace llvm {
virtual int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const { return 1;}
- /// \returns a list of all the registers that may be accesed using indirect
- /// addressing.
- std::vector<unsigned> getIndirectReservedRegs(const MachineFunction &MF) const;
-
- virtual int getIndirectIndexBegin(const MachineFunction &MF) const;
-
- virtual int getIndirectIndexEnd(const MachineFunction &MF) const;
-
+ /// \brief Reserve the registers that may be accesed using indirect addressing.
+ void reserveIndirectRegisters(BitVector &Reserved,
+ const MachineFunction &MF) const;
virtual unsigned calculateIndirectAddress(unsigned RegIndex,
unsigned Channel) const;
- virtual const TargetRegisterClass *getIndirectAddrStoreRegClass(
- unsigned SourceReg) const;
-
- virtual const TargetRegisterClass *getIndirectAddrLoadRegClass() const;
+ virtual const TargetRegisterClass *getIndirectAddrRegClass() const;
virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
@@ -152,8 +216,6 @@ namespace llvm {
unsigned ValueReg, unsigned Address,
unsigned OffsetReg) const;
- virtual const TargetRegisterClass *getSuperIndirectRegClass() const;
-
unsigned getMaxAlusPerClause() const;
///buildDefaultInstruction - This function returns a MachineInstr with
@@ -170,23 +232,32 @@ namespace llvm {
unsigned Src0Reg,
unsigned Src1Reg = 0) const;
+ MachineInstr *buildSlotOfVectorInstruction(MachineBasicBlock &MBB,
+ MachineInstr *MI,
+ unsigned Slot,
+ unsigned DstReg) const;
+
MachineInstr *buildMovImm(MachineBasicBlock &BB,
MachineBasicBlock::iterator I,
unsigned DstReg,
uint64_t Imm) const;
+ MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, unsigned SrcReg) const;
+
/// \brief Get the index of Op in the MachineInstr.
///
/// \returns -1 if the Instruction does not contain the specified \p Op.
- int getOperandIdx(const MachineInstr &MI, R600Operands::Ops Op) const;
+ int getOperandIdx(const MachineInstr &MI, unsigned Op) const;
/// \brief Get the index of \p Op for the given Opcode.
///
/// \returns -1 if the Instruction does not contain the specified \p Op.
- int getOperandIdx(unsigned Opcode, R600Operands::Ops Op) const;
+ int getOperandIdx(unsigned Opcode, unsigned Op) const;
/// \brief Helper function for setting instruction flag values.
- void setImmOperand(MachineInstr *MI, R600Operands::Ops Op, int64_t Imm) const;
+ void setImmOperand(MachineInstr *MI, unsigned Op, int64_t Imm) const;
/// \returns true if this instruction has an operand for storing target flags.
bool hasFlagOperand(const MachineInstr &MI) const;
@@ -208,6 +279,12 @@ namespace llvm {
void clearFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
};
+namespace AMDGPU {
+
+int getLDSNoRetOp(uint16_t Opcode);
+
+} //End namespace AMDGPU
+
} // End llvm namespace
#endif // R600INSTRINFO_H_
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index 8f47523..0346e24 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -12,44 +12,7 @@
//===----------------------------------------------------------------------===//
include "R600Intrinsics.td"
-
-class InstR600 <dag outs, dag ins, string asm, list<dag> pattern,
- InstrItinClass itin>
- : AMDGPUInst <outs, ins, asm, pattern> {
-
- field bits<64> Inst;
- bit TransOnly = 0;
- bit Trig = 0;
- bit Op3 = 0;
- bit isVector = 0;
- bits<2> FlagOperandIdx = 0;
- bit Op1 = 0;
- bit Op2 = 0;
- bit HasNativeOperands = 0;
- bit VTXInst = 0;
- bit TEXInst = 0;
-
- let Namespace = "AMDGPU";
- let OutOperandList = outs;
- let InOperandList = ins;
- let AsmString = asm;
- let Pattern = pattern;
- let Itinerary = itin;
-
- let TSFlags{0} = TransOnly;
- let TSFlags{4} = Trig;
- let TSFlags{5} = Op3;
-
- // Vector instructions are instructions that must fill all slots in an
- // instruction group
- let TSFlags{6} = isVector;
- let TSFlags{8-7} = FlagOperandIdx;
- let TSFlags{9} = HasNativeOperands;
- let TSFlags{10} = Op1;
- let TSFlags{11} = Op2;
- let TSFlags{12} = VTXInst;
- let TSFlags{13} = TEXInst;
-}
+include "R600InstrFormats.td"
class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> :
InstR600 <outs, ins, asm, pattern, NullALU> {
@@ -96,6 +59,12 @@ def UP : InstFlag <"printUpdatePred">;
// Once we start using the packetizer in this backend we should have this
// default to 0.
def LAST : InstFlag<"printLast", 1>;
+def RSel : Operand<i32> {
+ let PrintMethod = "printRSel";
+}
+def CT: Operand<i32> {
+ let PrintMethod = "printCT";
+}
def FRAMEri : Operand<iPTR> {
let MIOperandInfo = (ops R600_Reg32:$ptr, i32imm:$index);
@@ -106,237 +75,7 @@ def ADDRDWord : ComplexPattern<i32, 1, "SelectADDRDWord", [], []>;
def ADDRVTX_READ : ComplexPattern<i32, 2, "SelectADDRVTX_READ", [], []>;
def ADDRGA_CONST_OFFSET : ComplexPattern<i32, 1, "SelectGlobalValueConstantOffset", [], []>;
def ADDRGA_VAR_OFFSET : ComplexPattern<i32, 2, "SelectGlobalValueVariableOffset", [], []>;
-def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
-
-class R600ALU_Word0 {
- field bits<32> Word0;
-
- bits<11> src0;
- bits<1> src0_neg;
- bits<1> src0_rel;
- bits<11> src1;
- bits<1> src1_rel;
- bits<1> src1_neg;
- bits<3> index_mode = 0;
- bits<2> pred_sel;
- bits<1> last;
-
- bits<9> src0_sel = src0{8-0};
- bits<2> src0_chan = src0{10-9};
- bits<9> src1_sel = src1{8-0};
- bits<2> src1_chan = src1{10-9};
-
- let Word0{8-0} = src0_sel;
- let Word0{9} = src0_rel;
- let Word0{11-10} = src0_chan;
- let Word0{12} = src0_neg;
- let Word0{21-13} = src1_sel;
- let Word0{22} = src1_rel;
- let Word0{24-23} = src1_chan;
- let Word0{25} = src1_neg;
- let Word0{28-26} = index_mode;
- let Word0{30-29} = pred_sel;
- let Word0{31} = last;
-}
-
-class R600ALU_Word1 {
- field bits<32> Word1;
-
- bits<11> dst;
- bits<3> bank_swizzle;
- bits<1> dst_rel;
- bits<1> clamp;
-
- bits<7> dst_sel = dst{6-0};
- bits<2> dst_chan = dst{10-9};
-
- let Word1{20-18} = bank_swizzle;
- let Word1{27-21} = dst_sel;
- let Word1{28} = dst_rel;
- let Word1{30-29} = dst_chan;
- let Word1{31} = clamp;
-}
-
-class R600ALU_Word1_OP2 <bits<11> alu_inst> : R600ALU_Word1{
-
- bits<1> src0_abs;
- bits<1> src1_abs;
- bits<1> update_exec_mask;
- bits<1> update_pred;
- bits<1> write;
- bits<2> omod;
-
- let Word1{0} = src0_abs;
- let Word1{1} = src1_abs;
- let Word1{2} = update_exec_mask;
- let Word1{3} = update_pred;
- let Word1{4} = write;
- let Word1{6-5} = omod;
- let Word1{17-7} = alu_inst;
-}
-class R600ALU_Word1_OP3 <bits<5> alu_inst> : R600ALU_Word1{
-
- bits<11> src2;
- bits<1> src2_rel;
- bits<1> src2_neg;
-
- bits<9> src2_sel = src2{8-0};
- bits<2> src2_chan = src2{10-9};
-
- let Word1{8-0} = src2_sel;
- let Word1{9} = src2_rel;
- let Word1{11-10} = src2_chan;
- let Word1{12} = src2_neg;
- let Word1{17-13} = alu_inst;
-}
-
-class VTX_WORD0 {
- field bits<32> Word0;
- bits<7> SRC_GPR;
- bits<5> VC_INST;
- bits<2> FETCH_TYPE;
- bits<1> FETCH_WHOLE_QUAD;
- bits<8> BUFFER_ID;
- bits<1> SRC_REL;
- bits<2> SRC_SEL_X;
- bits<6> MEGA_FETCH_COUNT;
-
- let Word0{4-0} = VC_INST;
- let Word0{6-5} = FETCH_TYPE;
- let Word0{7} = FETCH_WHOLE_QUAD;
- let Word0{15-8} = BUFFER_ID;
- let Word0{22-16} = SRC_GPR;
- let Word0{23} = SRC_REL;
- let Word0{25-24} = SRC_SEL_X;
- let Word0{31-26} = MEGA_FETCH_COUNT;
-}
-
-class VTX_WORD1_GPR {
- field bits<32> Word1;
- bits<7> DST_GPR;
- bits<1> DST_REL;
- bits<3> DST_SEL_X;
- bits<3> DST_SEL_Y;
- bits<3> DST_SEL_Z;
- bits<3> DST_SEL_W;
- bits<1> USE_CONST_FIELDS;
- bits<6> DATA_FORMAT;
- bits<2> NUM_FORMAT_ALL;
- bits<1> FORMAT_COMP_ALL;
- bits<1> SRF_MODE_ALL;
-
- let Word1{6-0} = DST_GPR;
- let Word1{7} = DST_REL;
- let Word1{8} = 0; // Reserved
- let Word1{11-9} = DST_SEL_X;
- let Word1{14-12} = DST_SEL_Y;
- let Word1{17-15} = DST_SEL_Z;
- let Word1{20-18} = DST_SEL_W;
- let Word1{21} = USE_CONST_FIELDS;
- let Word1{27-22} = DATA_FORMAT;
- let Word1{29-28} = NUM_FORMAT_ALL;
- let Word1{30} = FORMAT_COMP_ALL;
- let Word1{31} = SRF_MODE_ALL;
-}
-
-class TEX_WORD0 {
- field bits<32> Word0;
-
- bits<5> TEX_INST;
- bits<2> INST_MOD;
- bits<1> FETCH_WHOLE_QUAD;
- bits<8> RESOURCE_ID;
- bits<7> SRC_GPR;
- bits<1> SRC_REL;
- bits<1> ALT_CONST;
- bits<2> RESOURCE_INDEX_MODE;
- bits<2> SAMPLER_INDEX_MODE;
-
- let Word0{4-0} = TEX_INST;
- let Word0{6-5} = INST_MOD;
- let Word0{7} = FETCH_WHOLE_QUAD;
- let Word0{15-8} = RESOURCE_ID;
- let Word0{22-16} = SRC_GPR;
- let Word0{23} = SRC_REL;
- let Word0{24} = ALT_CONST;
- let Word0{26-25} = RESOURCE_INDEX_MODE;
- let Word0{28-27} = SAMPLER_INDEX_MODE;
-}
-
-class TEX_WORD1 {
- field bits<32> Word1;
-
- bits<7> DST_GPR;
- bits<1> DST_REL;
- bits<3> DST_SEL_X;
- bits<3> DST_SEL_Y;
- bits<3> DST_SEL_Z;
- bits<3> DST_SEL_W;
- bits<7> LOD_BIAS;
- bits<1> COORD_TYPE_X;
- bits<1> COORD_TYPE_Y;
- bits<1> COORD_TYPE_Z;
- bits<1> COORD_TYPE_W;
-
- let Word1{6-0} = DST_GPR;
- let Word1{7} = DST_REL;
- let Word1{11-9} = DST_SEL_X;
- let Word1{14-12} = DST_SEL_Y;
- let Word1{17-15} = DST_SEL_Z;
- let Word1{20-18} = DST_SEL_W;
- let Word1{27-21} = LOD_BIAS;
- let Word1{28} = COORD_TYPE_X;
- let Word1{29} = COORD_TYPE_Y;
- let Word1{30} = COORD_TYPE_Z;
- let Word1{31} = COORD_TYPE_W;
-}
-
-class TEX_WORD2 {
- field bits<32> Word2;
-
- bits<5> OFFSET_X;
- bits<5> OFFSET_Y;
- bits<5> OFFSET_Z;
- bits<5> SAMPLER_ID;
- bits<3> SRC_SEL_X;
- bits<3> SRC_SEL_Y;
- bits<3> SRC_SEL_Z;
- bits<3> SRC_SEL_W;
-
- let Word2{4-0} = OFFSET_X;
- let Word2{9-5} = OFFSET_Y;
- let Word2{14-10} = OFFSET_Z;
- let Word2{19-15} = SAMPLER_ID;
- let Word2{22-20} = SRC_SEL_X;
- let Word2{25-23} = SRC_SEL_Y;
- let Word2{28-26} = SRC_SEL_Z;
- let Word2{31-29} = SRC_SEL_W;
-}
-
-/*
-XXX: R600 subtarget uses a slightly different encoding than the other
-subtargets. We currently handle this in R600MCCodeEmitter, but we may
-want to use these instruction classes in the future.
-
-class R600ALU_Word1_OP2_r600 : R600ALU_Word1_OP2 {
-
- bits<1> fog_merge;
- bits<10> alu_inst;
-
- let Inst{37} = fog_merge;
- let Inst{39-38} = omod;
- let Inst{49-40} = alu_inst;
-}
-
-class R600ALU_Word1_OP2_r700 : R600ALU_Word1_OP2 {
-
- bits<11> alu_inst;
-
- let Inst{38-37} = omod;
- let Inst{49-39} = alu_inst;
-}
-*/
def R600_Pred : PredicateOperand<i32, (ops R600_Predicate),
(ops PRED_SEL_OFF)>;
@@ -358,7 +97,7 @@ class R600_1OP <bits<11> inst, string opName, list<dag> pattern,
LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal,
BANK_SWIZZLE:$bank_swizzle),
!strconcat(" ", opName,
- "$last$clamp $dst$write$dst_rel$omod, "
+ "$clamp $last $dst$write$dst_rel$omod, "
"$src0_neg$src0_abs$src0$src0_abs$src0_rel, "
"$pred_sel $bank_swizzle"),
pattern,
@@ -374,7 +113,9 @@ class R600_1OP <bits<11> inst, string opName, list<dag> pattern,
let update_pred = 0;
let HasNativeOperands = 1;
let Op1 = 1;
+ let ALUInst = 1;
let DisableEncoding = "$literal";
+ let UseNamedOperandTable = 1;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
@@ -386,7 +127,7 @@ class R600_1OP_Helper <bits<11> inst, string opName, SDPatternOperator node,
[(set R600_Reg32:$dst, (node R600_Reg32:$src0))]
>;
-// If you add our change the operands for R600_2OP instructions, you must
+// If you add or change the operands for R600_2OP instructions, you must
// also update the R600Op2OperandIndex::ROI enum in R600Defines.h,
// R600InstrInfo::buildDefaultInstruction(), and R600InstrInfo::getOperandIdx().
class R600_2OP <bits<11> inst, string opName, list<dag> pattern,
@@ -399,7 +140,7 @@ class R600_2OP <bits<11> inst, string opName, list<dag> pattern,
LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal,
BANK_SWIZZLE:$bank_swizzle),
!strconcat(" ", opName,
- "$last$clamp $update_exec_mask$update_pred$dst$write$dst_rel$omod, "
+ "$clamp $last $update_exec_mask$update_pred$dst$write$dst_rel$omod, "
"$src0_neg$src0_abs$src0$src0_abs$src0_rel, "
"$src1_neg$src1_abs$src1$src1_abs$src1_rel, "
"$pred_sel $bank_swizzle"),
@@ -410,7 +151,9 @@ class R600_2OP <bits<11> inst, string opName, list<dag> pattern,
let HasNativeOperands = 1;
let Op2 = 1;
+ let ALUInst = 1;
let DisableEncoding = "$literal";
+ let UseNamedOperandTable = 1;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
@@ -436,7 +179,7 @@ class R600_3OP <bits<5> inst, string opName, list<dag> pattern,
R600_Reg32:$src2, NEG:$src2_neg, REL:$src2_rel, SEL:$src2_sel,
LAST:$last, R600_Pred:$pred_sel, LITERAL:$literal,
BANK_SWIZZLE:$bank_swizzle),
- !strconcat(" ", opName, "$last$clamp $dst$dst_rel, "
+ !strconcat(" ", opName, "$clamp $last $dst$dst_rel, "
"$src0_neg$src0$src0_rel, "
"$src1_neg$src1$src1_rel, "
"$src2_neg$src2$src2_rel, "
@@ -450,6 +193,8 @@ class R600_3OP <bits<5> inst, string opName, list<dag> pattern,
let HasNativeOperands = 1;
let DisableEncoding = "$literal";
let Op3 = 1;
+ let UseNamedOperandTable = 1;
+ let ALUInst = 1;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
@@ -463,38 +208,7 @@ class R600_REDUCTION <bits<11> inst, dag ins, string asm, list<dag> pattern,
pattern,
itin>;
-class R600_TEX <bits<11> inst, string opName, list<dag> pattern,
- InstrItinClass itin = AnyALU> :
- InstR600 <(outs R600_Reg128:$DST_GPR),
- (ins R600_Reg128:$SRC_GPR, i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID, i32imm:$textureTarget),
- !strconcat(opName, "$DST_GPR, $SRC_GPR, $RESOURCE_ID, $SAMPLER_ID, $textureTarget"),
- pattern,
- itin>, TEX_WORD0, TEX_WORD1, TEX_WORD2 {
- let Inst{31-0} = Word0;
- let Inst{63-32} = Word1;
-
- let TEX_INST = inst{4-0};
- let SRC_REL = 0;
- let DST_REL = 0;
- let DST_SEL_X = 0;
- let DST_SEL_Y = 1;
- let DST_SEL_Z = 2;
- let DST_SEL_W = 3;
- let LOD_BIAS = 0;
-
- let INST_MOD = 0;
- let FETCH_WHOLE_QUAD = 0;
- let ALT_CONST = 0;
- let SAMPLER_INDEX_MODE = 0;
- let RESOURCE_INDEX_MODE = 0;
-
- let COORD_TYPE_X = 0;
- let COORD_TYPE_Y = 0;
- let COORD_TYPE_Z = 0;
- let COORD_TYPE_W = 0;
-
- let TEXInst = 1;
- }
+
} // End mayLoad = 1, mayStore = 0, hasSideEffects = 0
@@ -515,7 +229,7 @@ def TEX_RECT : PatLeaf<
def TEX_ARRAY : PatLeaf<
(imm),
[{uint32_t TType = (uint32_t)N->getZExtValue();
- return TType == 9 || TType == 10 || TType == 15 || TType == 16;
+ return TType == 9 || TType == 10 || TType == 16;
}]
>;
@@ -526,76 +240,115 @@ def TEX_SHADOW_ARRAY : PatLeaf<
}]
>;
-class EG_CF_RAT <bits <8> cf_inst, bits <6> rat_inst, bits<4> rat_id, dag outs,
- dag ins, string asm, list<dag> pattern> :
- InstR600ISA <outs, ins, asm, pattern> {
- bits<7> RW_GPR;
- bits<7> INDEX_GPR;
-
- bits<2> RIM;
- bits<2> TYPE;
- bits<1> RW_REL;
- bits<2> ELEM_SIZE;
-
- bits<12> ARRAY_SIZE;
- bits<4> COMP_MASK;
- bits<4> BURST_COUNT;
- bits<1> VPM;
- bits<1> eop;
- bits<1> MARK;
- bits<1> BARRIER;
-
- // CF_ALLOC_EXPORT_WORD0_RAT
- let Inst{3-0} = rat_id;
- let Inst{9-4} = rat_inst;
- let Inst{10} = 0; // Reserved
- let Inst{12-11} = RIM;
- let Inst{14-13} = TYPE;
- let Inst{21-15} = RW_GPR;
- let Inst{22} = RW_REL;
- let Inst{29-23} = INDEX_GPR;
- let Inst{31-30} = ELEM_SIZE;
-
- // CF_ALLOC_EXPORT_WORD1_BUF
- let Inst{43-32} = ARRAY_SIZE;
- let Inst{47-44} = COMP_MASK;
- let Inst{51-48} = BURST_COUNT;
- let Inst{52} = VPM;
- let Inst{53} = eop;
- let Inst{61-54} = cf_inst;
- let Inst{62} = MARK;
- let Inst{63} = BARRIER;
+def TEX_MSAA : PatLeaf<
+ (imm),
+ [{uint32_t TType = (uint32_t)N->getZExtValue();
+ return TType == 14;
+ }]
+>;
+
+def TEX_ARRAY_MSAA : PatLeaf<
+ (imm),
+ [{uint32_t TType = (uint32_t)N->getZExtValue();
+ return TType == 15;
+ }]
+>;
+
+class EG_CF_RAT <bits <8> cfinst, bits <6> ratinst, bits<4> ratid, bits<4> mask,
+ dag outs, dag ins, string asm, list<dag> pattern> :
+ InstR600ISA <outs, ins, asm, pattern>,
+ CF_ALLOC_EXPORT_WORD0_RAT, CF_ALLOC_EXPORT_WORD1_BUF {
+
+ let rat_id = ratid;
+ let rat_inst = ratinst;
+ let rim = 0;
+ // XXX: Have a separate instruction for non-indexed writes.
+ let type = 1;
+ let rw_rel = 0;
+ let elem_size = 0;
+
+ let array_size = 0;
+ let comp_mask = mask;
+ let burst_count = 0;
+ let vpm = 0;
+ let cf_inst = cfinst;
+ let mark = 0;
+ let barrier = 1;
+
+ let Inst{31-0} = Word0;
+ let Inst{63-32} = Word1;
+ let IsExport = 1;
+
+}
+
+class VTX_READ <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
+ : InstR600ISA <outs, (ins MEMxi:$src_gpr), name, pattern>,
+ VTX_WORD1_GPR {
+
+ // Static fields
+ let DST_REL = 0;
+ // The docs say that if this bit is set, then DATA_FORMAT, NUM_FORMAT_ALL,
+ // FORMAT_COMP_ALL, SRF_MODE_ALL, and ENDIAN_SWAP fields will be ignored,
+ // however, based on my testing if USE_CONST_FIELDS is set, then all
+ // these fields need to be set to 0.
+ let USE_CONST_FIELDS = 0;
+ let NUM_FORMAT_ALL = 1;
+ let FORMAT_COMP_ALL = 0;
+ let SRF_MODE_ALL = 0;
+
+ let Inst{63-32} = Word1;
+ // LLVM can only encode 64-bit instructions, so these fields are manually
+ // encoded in R600CodeEmitter
+ //
+ // bits<16> OFFSET;
+ // bits<2> ENDIAN_SWAP = 0;
+ // bits<1> CONST_BUF_NO_STRIDE = 0;
+ // bits<1> MEGA_FETCH = 0;
+ // bits<1> ALT_CONST = 0;
+ // bits<2> BUFFER_INDEX_MODE = 0;
+
+ // VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding
+ // is done in R600CodeEmitter
+ //
+ // Inst{79-64} = OFFSET;
+ // Inst{81-80} = ENDIAN_SWAP;
+ // Inst{82} = CONST_BUF_NO_STRIDE;
+ // Inst{83} = MEGA_FETCH;
+ // Inst{84} = ALT_CONST;
+ // Inst{86-85} = BUFFER_INDEX_MODE;
+ // Inst{95-86} = 0; Reserved
+
+ // VTX_WORD3 (Padding)
+ //
+ // Inst{127-96} = 0;
+
+ let VTXInst = 1;
}
class LoadParamFrag <PatFrag load_type> : PatFrag <
(ops node:$ptr), (load_type node:$ptr),
- [{ return isParamLoad(dyn_cast<LoadSDNode>(N)); }]
+ [{ return isConstantLoad(dyn_cast<LoadSDNode>(N), 0); }]
>;
def load_param : LoadParamFrag<load>;
-def load_param_zexti8 : LoadParamFrag<zextloadi8>;
-def load_param_zexti16 : LoadParamFrag<zextloadi16>;
-
-def isR600 : Predicate<"Subtarget.device()"
- "->getGeneration() == AMDGPUDeviceInfo::HD4XXX">;
-def isR700 : Predicate<"Subtarget.device()"
- "->getGeneration() == AMDGPUDeviceInfo::HD4XXX &&"
- "Subtarget.device()->getDeviceFlag()"
- ">= OCL_DEVICE_RV710">;
+def load_param_exti8 : LoadParamFrag<az_extloadi8>;
+def load_param_exti16 : LoadParamFrag<az_extloadi16>;
+
+def isR600 : Predicate<"Subtarget.getGeneration() <= AMDGPUSubtarget::R700">;
+def isR700 : Predicate<"Subtarget.getGeneration() == AMDGPUSubtarget::R700">;
def isEG : Predicate<
- "Subtarget.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX && "
- "Subtarget.device()->getGeneration() < AMDGPUDeviceInfo::HD7XXX && "
- "Subtarget.device()->getDeviceFlag() != OCL_DEVICE_CAYMAN">;
+ "Subtarget.getGeneration() >= AMDGPUSubtarget::EVERGREEN && "
+ "Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS && "
+ "!Subtarget.hasCaymanISA()">;
-def isCayman : Predicate<"Subtarget.device()"
- "->getDeviceFlag() == OCL_DEVICE_CAYMAN">;
-def isEGorCayman : Predicate<"Subtarget.device()"
- "->getGeneration() == AMDGPUDeviceInfo::HD5XXX"
- "|| Subtarget.device()->getGeneration() =="
- "AMDGPUDeviceInfo::HD6XXX">;
+def isCayman : Predicate<"Subtarget.hasCaymanISA()">;
+def isEGorCayman : Predicate<"Subtarget.getGeneration() == "
+ "AMDGPUSubtarget::EVERGREEN"
+ "|| Subtarget.getGeneration() =="
+ "AMDGPUSubtarget::NORTHERN_ISLANDS">;
def isR600toCayman : Predicate<
- "Subtarget.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX">;
+ "Subtarget.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS">;
//===----------------------------------------------------------------------===//
// R600 SDNodes
@@ -603,13 +356,13 @@ def isR600toCayman : Predicate<
def INTERP_PAIR_XY : AMDGPUShaderInst <
(outs R600_TReg32_X:$dst0, R600_TReg32_Y:$dst1),
- (ins i32imm:$src0, R600_Reg32:$src1, R600_Reg32:$src2),
+ (ins i32imm:$src0, R600_TReg32_Y:$src1, R600_TReg32_X:$src2),
"INTERP_PAIR_XY $src0 $src1 $src2 : $dst0 dst1",
[]>;
def INTERP_PAIR_ZW : AMDGPUShaderInst <
(outs R600_TReg32_Z:$dst0, R600_TReg32_W:$dst1),
- (ins i32imm:$src0, R600_Reg32:$src1, R600_Reg32:$src2),
+ (ins i32imm:$src0, R600_TReg32_Y:$src1, R600_TReg32_X:$src2),
"INTERP_PAIR_ZW $src0 $src1 $src2 : $dst0 dst1",
[]>;
@@ -618,6 +371,44 @@ def CONST_ADDRESS: SDNode<"AMDGPUISD::CONST_ADDRESS",
[SDNPVariadic]
>;
+def DOT4 : SDNode<"AMDGPUISD::DOT4",
+ SDTypeProfile<1, 8, [SDTCisFP<0>, SDTCisVT<1, f32>, SDTCisVT<2, f32>,
+ SDTCisVT<3, f32>, SDTCisVT<4, f32>, SDTCisVT<5, f32>,
+ SDTCisVT<6, f32>, SDTCisVT<7, f32>, SDTCisVT<8, f32>]>,
+ []
+>;
+
+def COS_HW : SDNode<"AMDGPUISD::COS_HW",
+ SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>
+>;
+
+def SIN_HW : SDNode<"AMDGPUISD::SIN_HW",
+ SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>
+>;
+
+def TEXTURE_FETCH_Type : SDTypeProfile<1, 19, [SDTCisFP<0>]>;
+
+def TEXTURE_FETCH: SDNode<"AMDGPUISD::TEXTURE_FETCH", TEXTURE_FETCH_Type, []>;
+
+multiclass TexPattern<bits<32> TextureOp, Instruction inst, ValueType vt = v4f32> {
+def : Pat<(TEXTURE_FETCH (i32 TextureOp), vt:$SRC_GPR,
+ (i32 imm:$srcx), (i32 imm:$srcy), (i32 imm:$srcz), (i32 imm:$srcw),
+ (i32 imm:$offsetx), (i32 imm:$offsety), (i32 imm:$offsetz),
+ (i32 imm:$DST_SEL_X), (i32 imm:$DST_SEL_Y), (i32 imm:$DST_SEL_Z),
+ (i32 imm:$DST_SEL_W),
+ (i32 imm:$RESOURCE_ID), (i32 imm:$SAMPLER_ID),
+ (i32 imm:$COORD_TYPE_X), (i32 imm:$COORD_TYPE_Y), (i32 imm:$COORD_TYPE_Z),
+ (i32 imm:$COORD_TYPE_W)),
+ (inst R600_Reg128:$SRC_GPR,
+ imm:$srcx, imm:$srcy, imm:$srcz, imm:$srcw,
+ imm:$offsetx, imm:$offsety, imm:$offsetz,
+ imm:$DST_SEL_X, imm:$DST_SEL_Y, imm:$DST_SEL_Z,
+ imm:$DST_SEL_W,
+ imm:$RESOURCE_ID, imm:$SAMPLER_ID,
+ imm:$COORD_TYPE_X, imm:$COORD_TYPE_Y, imm:$COORD_TYPE_Z,
+ imm:$COORD_TYPE_W)>;
+}
+
//===----------------------------------------------------------------------===//
// Interpolation Instructions
//===----------------------------------------------------------------------===//
@@ -626,7 +417,7 @@ def INTERP_VEC_LOAD : AMDGPUShaderInst <
(outs R600_Reg128:$dst),
(ins i32imm:$src0),
"INTERP_LOAD $src0 : $dst",
- []>;
+ [(set R600_Reg128:$dst, (int_R600_interp_const imm:$src0))]>;
def INTERP_XY : R600_2OP <0xD6, "INTERP_XY", []> {
let bank_swizzle = 5;
@@ -753,13 +544,14 @@ let usesCustomInserter = 1, isNotDuplicable = 1 in {
class ExportSwzInst : InstR600ISA<(
outs),
(ins R600_Reg128:$gpr, i32imm:$type, i32imm:$arraybase,
- i32imm:$sw_x, i32imm:$sw_y, i32imm:$sw_z, i32imm:$sw_w, i32imm:$inst,
+ RSel:$sw_x, RSel:$sw_y, RSel:$sw_z, RSel:$sw_w, i32imm:$inst,
i32imm:$eop),
- !strconcat("EXPORT", " $gpr"),
+ !strconcat("EXPORT", " $gpr.$sw_x$sw_y$sw_z$sw_w"),
[]>, ExportWord0, ExportSwzWord1 {
let elem_size = 3;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
+ let IsExport = 1;
}
} // End usesCustomInserter = 1
@@ -773,47 +565,13 @@ class ExportBufInst : InstR600ISA<(
let elem_size = 0;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
+ let IsExport = 1;
}
//===----------------------------------------------------------------------===//
// Control Flow Instructions
//===----------------------------------------------------------------------===//
-class CF_ALU_WORD0 {
- field bits<32> Word0;
-
- bits<22> ADDR;
- bits<4> KCACHE_BANK0;
- bits<4> KCACHE_BANK1;
- bits<2> KCACHE_MODE0;
-
- let Word0{21-0} = ADDR;
- let Word0{25-22} = KCACHE_BANK0;
- let Word0{29-26} = KCACHE_BANK1;
- let Word0{31-30} = KCACHE_MODE0;
-}
-
-class CF_ALU_WORD1 {
- field bits<32> Word1;
-
- bits<2> KCACHE_MODE1;
- bits<8> KCACHE_ADDR0;
- bits<8> KCACHE_ADDR1;
- bits<7> COUNT;
- bits<1> ALT_CONST;
- bits<4> CF_INST;
- bits<1> WHOLE_QUAD_MODE;
- bits<1> BARRIER;
-
- let Word1{1-0} = KCACHE_MODE1;
- let Word1{9-2} = KCACHE_ADDR0;
- let Word1{17-10} = KCACHE_ADDR1;
- let Word1{24-18} = COUNT;
- let Word1{25} = ALT_CONST;
- let Word1{29-26} = CF_INST;
- let Word1{30} = WHOLE_QUAD_MODE;
- let Word1{31} = BARRIER;
-}
def KCACHE : InstFlag<"printKCache">;
@@ -821,7 +579,7 @@ class ALU_CLAUSE<bits<4> inst, string OpName> : AMDGPUInst <(outs),
(ins i32imm:$ADDR, i32imm:$KCACHE_BANK0, i32imm:$KCACHE_BANK1,
KCACHE:$KCACHE_MODE0, KCACHE:$KCACHE_MODE1,
i32imm:$KCACHE_ADDR0, i32imm:$KCACHE_ADDR1,
-i32imm:$COUNT),
+i32imm:$COUNT, i32imm:$Enabled),
!strconcat(OpName, " $COUNT, @$ADDR, "
"KC0[$KCACHE_MODE0], KC1[$KCACHE_MODE1]"),
[] >, CF_ALU_WORD0, CF_ALU_WORD1 {
@@ -831,6 +589,7 @@ i32imm:$COUNT),
let ALT_CONST = 0;
let WHOLE_QUAD_MODE = 0;
let BARRIER = 1;
+ let UseNamedOperandTable = 1;
let Inst{31-0} = Word0;
let Inst{63-32} = Word1;
@@ -844,45 +603,19 @@ class CF_WORD0_R600 {
let Word0 = ADDR;
}
-class CF_WORD1_R600 {
- field bits<32> Word1;
-
- bits<3> POP_COUNT;
- bits<5> CF_CONST;
- bits<2> COND;
- bits<3> COUNT;
- bits<6> CALL_COUNT;
- bits<1> COUNT_3;
- bits<1> END_OF_PROGRAM;
- bits<1> VALID_PIXEL_MODE;
- bits<7> CF_INST;
- bits<1> WHOLE_QUAD_MODE;
- bits<1> BARRIER;
-
- let Word1{2-0} = POP_COUNT;
- let Word1{7-3} = CF_CONST;
- let Word1{9-8} = COND;
- let Word1{12-10} = COUNT;
- let Word1{18-13} = CALL_COUNT;
- let Word1{19} = COUNT_3;
- let Word1{21} = END_OF_PROGRAM;
- let Word1{22} = VALID_PIXEL_MODE;
- let Word1{29-23} = CF_INST;
- let Word1{30} = WHOLE_QUAD_MODE;
- let Word1{31} = BARRIER;
-}
-
class CF_CLAUSE_R600 <bits<7> inst, dag ins, string AsmPrint> : AMDGPUInst <(outs),
ins, AsmPrint, [] >, CF_WORD0_R600, CF_WORD1_R600 {
field bits<64> Inst;
+ bits<4> CNT;
let CF_INST = inst;
let BARRIER = 1;
let CF_CONST = 0;
let VALID_PIXEL_MODE = 0;
let COND = 0;
+ let COUNT = CNT{2-0};
let CALL_COUNT = 0;
- let COUNT_3 = 0;
+ let COUNT_3 = CNT{3};
let END_OF_PROGRAM = 0;
let WHOLE_QUAD_MODE = 0;
@@ -890,38 +623,6 @@ ins, AsmPrint, [] >, CF_WORD0_R600, CF_WORD1_R600 {
let Inst{63-32} = Word1;
}
-class CF_WORD0_EG {
- field bits<32> Word0;
-
- bits<24> ADDR;
- bits<3> JUMPTABLE_SEL;
-
- let Word0{23-0} = ADDR;
- let Word0{26-24} = JUMPTABLE_SEL;
-}
-
-class CF_WORD1_EG {
- field bits<32> Word1;
-
- bits<3> POP_COUNT;
- bits<5> CF_CONST;
- bits<2> COND;
- bits<6> COUNT;
- bits<1> VALID_PIXEL_MODE;
- bits<1> END_OF_PROGRAM;
- bits<8> CF_INST;
- bits<1> BARRIER;
-
- let Word1{2-0} = POP_COUNT;
- let Word1{7-3} = CF_CONST;
- let Word1{9-8} = COND;
- let Word1{15-10} = COUNT;
- let Word1{20} = VALID_PIXEL_MODE;
- let Word1{21} = END_OF_PROGRAM;
- let Word1{29-22} = CF_INST;
- let Word1{31} = BARRIER;
-}
-
class CF_CLAUSE_EG <bits<8> inst, dag ins, string AsmPrint> : AMDGPUInst <(outs),
ins, AsmPrint, [] >, CF_WORD0_EG, CF_WORD1_EG {
field bits<64> Inst;
@@ -940,6 +641,7 @@ ins, AsmPrint, [] >, CF_WORD0_EG, CF_WORD1_EG {
def CF_ALU : ALU_CLAUSE<8, "ALU">;
def CF_ALU_PUSH_BEFORE : ALU_CLAUSE<9, "ALU_PUSH_BEFORE">;
+def CF_ALU_POP_AFTER : ALU_CLAUSE<10, "ALU_POP_AFTER">;
def FETCH_CLAUSE : AMDGPUInst <(outs),
(ins i32imm:$addr), "Fetch clause starting at $addr:", [] > {
@@ -987,42 +689,42 @@ def MIN : R600_2OP_Helper <0x4, "MIN", AMDGPUfmin>;
// XXX: Use the defs in TargetSelectionDAG.td instead of intrinsics.
def SETE : R600_2OP <
0x08, "SETE",
- [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_EQ))]
+ [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_OEQ))]
>;
def SGT : R600_2OP <
0x09, "SETGT",
- [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_GT))]
+ [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_OGT))]
>;
def SGE : R600_2OP <
0xA, "SETGE",
- [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_GE))]
+ [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_OGE))]
>;
def SNE : R600_2OP <
0xB, "SETNE",
- [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_NE))]
+ [(set f32:$dst, (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_UNE))]
>;
def SETE_DX10 : R600_2OP <
0xC, "SETE_DX10",
- [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_EQ))]
+ [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_OEQ))]
>;
def SETGT_DX10 : R600_2OP <
0xD, "SETGT_DX10",
- [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_GT))]
+ [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_OGT))]
>;
def SETGE_DX10 : R600_2OP <
0xE, "SETGE_DX10",
- [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_GE))]
+ [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_OGE))]
>;
def SETNE_DX10 : R600_2OP <
0xF, "SETNE_DX10",
- [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_NE))]
+ [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_UNE))]
>;
def FRACT : R600_1OP_Helper <0x10, "FRACT", AMDGPUfract>;
@@ -1120,104 +822,86 @@ def CNDE_INT : R600_3OP <
def CNDGE_INT : R600_3OP <
0x1E, "CNDGE_INT",
- [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_GE))]
+ [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_SGE))]
>;
def CNDGT_INT : R600_3OP <
0x1D, "CNDGT_INT",
- [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_GT))]
+ [(set i32:$dst, (selectcc i32:$src0, 0, i32:$src1, i32:$src2, COND_SGT))]
>;
//===----------------------------------------------------------------------===//
// Texture instructions
//===----------------------------------------------------------------------===//
-def TEX_LD : R600_TEX <
- 0x03, "TEX_LD",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txf v4f32:$SRC_GPR,
- imm:$OFFSET_X, imm:$OFFSET_Y, imm:$OFFSET_Z, imm:$RESOURCE_ID,
- imm:$SAMPLER_ID, imm:$textureTarget))]
-> {
-let AsmString = "TEX_LD $DST_GPR, $SRC_GPR, $OFFSET_X, $OFFSET_Y, $OFFSET_Z,"
- "$RESOURCE_ID, $SAMPLER_ID, $textureTarget";
-let InOperandList = (ins R600_Reg128:$SRC_GPR, i32imm:$OFFSET_X,
- i32imm:$OFFSET_Y, i32imm:$OFFSET_Z, i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID,
- i32imm:$textureTarget);
-}
-
-def TEX_GET_TEXTURE_RESINFO : R600_TEX <
- 0x04, "TEX_GET_TEXTURE_RESINFO",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txq v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
-
-def TEX_GET_GRADIENTS_H : R600_TEX <
- 0x07, "TEX_GET_GRADIENTS_H",
- [(set v4f32:$DST_GPR, (int_AMDGPU_ddx v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
-
-def TEX_GET_GRADIENTS_V : R600_TEX <
- 0x08, "TEX_GET_GRADIENTS_V",
- [(set v4f32:$DST_GPR, (int_AMDGPU_ddy v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
-
-def TEX_SET_GRADIENTS_H : R600_TEX <
- 0x0B, "TEX_SET_GRADIENTS_H",
- []
->;
-
-def TEX_SET_GRADIENTS_V : R600_TEX <
- 0x0C, "TEX_SET_GRADIENTS_V",
- []
->;
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
-def TEX_SAMPLE : R600_TEX <
- 0x10, "TEX_SAMPLE",
- [(set v4f32:$DST_GPR, (int_AMDGPU_tex v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
+class R600_TEX <bits<11> inst, string opName> :
+ InstR600 <(outs R600_Reg128:$DST_GPR),
+ (ins R600_Reg128:$SRC_GPR,
+ RSel:$srcx, RSel:$srcy, RSel:$srcz, RSel:$srcw,
+ i32imm:$offsetx, i32imm:$offsety, i32imm:$offsetz,
+ RSel:$DST_SEL_X, RSel:$DST_SEL_Y, RSel:$DST_SEL_Z, RSel:$DST_SEL_W,
+ i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID,
+ CT:$COORD_TYPE_X, CT:$COORD_TYPE_Y, CT:$COORD_TYPE_Z,
+ CT:$COORD_TYPE_W),
+ !strconcat(opName,
+ " $DST_GPR.$DST_SEL_X$DST_SEL_Y$DST_SEL_Z$DST_SEL_W, "
+ "$SRC_GPR.$srcx$srcy$srcz$srcw "
+ "RID:$RESOURCE_ID SID:$SAMPLER_ID "
+ "CT:$COORD_TYPE_X$COORD_TYPE_Y$COORD_TYPE_Z$COORD_TYPE_W"),
+ [],
+ NullALU>, TEX_WORD0, TEX_WORD1, TEX_WORD2 {
+ let Inst{31-0} = Word0;
+ let Inst{63-32} = Word1;
-def TEX_SAMPLE_C : R600_TEX <
- 0x18, "TEX_SAMPLE_C",
- [(set v4f32:$DST_GPR, (int_AMDGPU_tex v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))]
->;
+ let TEX_INST = inst{4-0};
+ let SRC_REL = 0;
+ let DST_REL = 0;
+ let LOD_BIAS = 0;
-def TEX_SAMPLE_L : R600_TEX <
- 0x11, "TEX_SAMPLE_L",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txl v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
+ let INST_MOD = 0;
+ let FETCH_WHOLE_QUAD = 0;
+ let ALT_CONST = 0;
+ let SAMPLER_INDEX_MODE = 0;
+ let RESOURCE_INDEX_MODE = 0;
-def TEX_SAMPLE_C_L : R600_TEX <
- 0x19, "TEX_SAMPLE_C_L",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txl v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))]
->;
+ let TEXInst = 1;
+}
-def TEX_SAMPLE_LB : R600_TEX <
- 0x12, "TEX_SAMPLE_LB",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txb v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, imm:$textureTarget))]
->;
+} // End mayLoad = 0, mayStore = 0, hasSideEffects = 0
-def TEX_SAMPLE_C_LB : R600_TEX <
- 0x1A, "TEX_SAMPLE_C_LB",
- [(set v4f32:$DST_GPR, (int_AMDGPU_txb v4f32:$SRC_GPR,
- imm:$RESOURCE_ID, imm:$SAMPLER_ID, TEX_SHADOW:$textureTarget))]
->;
-def TEX_SAMPLE_G : R600_TEX <
- 0x14, "TEX_SAMPLE_G",
- []
->;
-def TEX_SAMPLE_C_G : R600_TEX <
- 0x1C, "TEX_SAMPLE_C_G",
- []
->;
+def TEX_SAMPLE : R600_TEX <0x10, "TEX_SAMPLE">;
+def TEX_SAMPLE_C : R600_TEX <0x18, "TEX_SAMPLE_C">;
+def TEX_SAMPLE_L : R600_TEX <0x11, "TEX_SAMPLE_L">;
+def TEX_SAMPLE_C_L : R600_TEX <0x19, "TEX_SAMPLE_C_L">;
+def TEX_SAMPLE_LB : R600_TEX <0x12, "TEX_SAMPLE_LB">;
+def TEX_SAMPLE_C_LB : R600_TEX <0x1A, "TEX_SAMPLE_C_LB">;
+def TEX_LD : R600_TEX <0x03, "TEX_LD">;
+def TEX_LDPTR : R600_TEX <0x03, "TEX_LDPTR"> {
+ let INST_MOD = 1;
+}
+def TEX_GET_TEXTURE_RESINFO : R600_TEX <0x04, "TEX_GET_TEXTURE_RESINFO">;
+def TEX_GET_GRADIENTS_H : R600_TEX <0x07, "TEX_GET_GRADIENTS_H">;
+def TEX_GET_GRADIENTS_V : R600_TEX <0x08, "TEX_GET_GRADIENTS_V">;
+def TEX_SET_GRADIENTS_H : R600_TEX <0x0B, "TEX_SET_GRADIENTS_H">;
+def TEX_SET_GRADIENTS_V : R600_TEX <0x0C, "TEX_SET_GRADIENTS_V">;
+def TEX_SAMPLE_G : R600_TEX <0x14, "TEX_SAMPLE_G">;
+def TEX_SAMPLE_C_G : R600_TEX <0x1C, "TEX_SAMPLE_C_G">;
+
+defm : TexPattern<0, TEX_SAMPLE>;
+defm : TexPattern<1, TEX_SAMPLE_C>;
+defm : TexPattern<2, TEX_SAMPLE_L>;
+defm : TexPattern<3, TEX_SAMPLE_C_L>;
+defm : TexPattern<4, TEX_SAMPLE_LB>;
+defm : TexPattern<5, TEX_SAMPLE_C_LB>;
+defm : TexPattern<6, TEX_LD, v4i32>;
+defm : TexPattern<7, TEX_GET_TEXTURE_RESINFO, v4i32>;
+defm : TexPattern<8, TEX_GET_GRADIENTS_H>;
+defm : TexPattern<9, TEX_GET_GRADIENTS_V>;
+defm : TexPattern<10, TEX_LDPTR, v4i32>;
//===----------------------------------------------------------------------===//
// Helper classes for common instructions
@@ -1240,41 +924,82 @@ class MULADD_IEEE_Common <bits<5> inst> : R600_3OP <
class CNDE_Common <bits<5> inst> : R600_3OP <
inst, "CNDE",
- [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_EQ))]
+ [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_OEQ))]
>;
class CNDGT_Common <bits<5> inst> : R600_3OP <
inst, "CNDGT",
- [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_GT))]
->;
+ [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_OGT))]
+> {
+ let Itinerary = VecALU;
+}
class CNDGE_Common <bits<5> inst> : R600_3OP <
inst, "CNDGE",
- [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_GE))]
->;
-
-multiclass DOT4_Common <bits<11> inst> {
+ [(set f32:$dst, (selectcc f32:$src0, FP_ZERO, f32:$src1, f32:$src2, COND_OGE))]
+> {
+ let Itinerary = VecALU;
+}
+
+
+let isCodeGenOnly = 1, isPseudo = 1, Namespace = "AMDGPU" in {
+class R600_VEC2OP<list<dag> pattern> : InstR600 <(outs R600_Reg32:$dst), (ins
+// Slot X
+ UEM:$update_exec_mask_X, UP:$update_pred_X, WRITE:$write_X,
+ OMOD:$omod_X, REL:$dst_rel_X, CLAMP:$clamp_X,
+ R600_TReg32_X:$src0_X, NEG:$src0_neg_X, REL:$src0_rel_X, ABS:$src0_abs_X, SEL:$src0_sel_X,
+ R600_TReg32_X:$src1_X, NEG:$src1_neg_X, REL:$src1_rel_X, ABS:$src1_abs_X, SEL:$src1_sel_X,
+ R600_Pred:$pred_sel_X,
+// Slot Y
+ UEM:$update_exec_mask_Y, UP:$update_pred_Y, WRITE:$write_Y,
+ OMOD:$omod_Y, REL:$dst_rel_Y, CLAMP:$clamp_Y,
+ R600_TReg32_Y:$src0_Y, NEG:$src0_neg_Y, REL:$src0_rel_Y, ABS:$src0_abs_Y, SEL:$src0_sel_Y,
+ R600_TReg32_Y:$src1_Y, NEG:$src1_neg_Y, REL:$src1_rel_Y, ABS:$src1_abs_Y, SEL:$src1_sel_Y,
+ R600_Pred:$pred_sel_Y,
+// Slot Z
+ UEM:$update_exec_mask_Z, UP:$update_pred_Z, WRITE:$write_Z,
+ OMOD:$omod_Z, REL:$dst_rel_Z, CLAMP:$clamp_Z,
+ R600_TReg32_Z:$src0_Z, NEG:$src0_neg_Z, REL:$src0_rel_Z, ABS:$src0_abs_Z, SEL:$src0_sel_Z,
+ R600_TReg32_Z:$src1_Z, NEG:$src1_neg_Z, REL:$src1_rel_Z, ABS:$src1_abs_Z, SEL:$src1_sel_Z,
+ R600_Pred:$pred_sel_Z,
+// Slot W
+ UEM:$update_exec_mask_W, UP:$update_pred_W, WRITE:$write_W,
+ OMOD:$omod_W, REL:$dst_rel_W, CLAMP:$clamp_W,
+ R600_TReg32_W:$src0_W, NEG:$src0_neg_W, REL:$src0_rel_W, ABS:$src0_abs_W, SEL:$src0_sel_W,
+ R600_TReg32_W:$src1_W, NEG:$src1_neg_W, REL:$src1_rel_W, ABS:$src1_abs_W, SEL:$src1_sel_W,
+ R600_Pred:$pred_sel_W,
+ LITERAL:$literal0, LITERAL:$literal1),
+ "",
+ pattern,
+ AnyALU> {
- def _pseudo : R600_REDUCTION <inst,
- (ins R600_Reg128:$src0, R600_Reg128:$src1),
- "DOT4 $dst $src0, $src1",
- [(set f32:$dst, (int_AMDGPU_dp4 v4f32:$src0, v4f32:$src1))]
- >;
+ let UseNamedOperandTable = 1;
- def _real : R600_2OP <inst, "DOT4", []>;
}
+}
+
+def DOT_4 : R600_VEC2OP<[(set R600_Reg32:$dst, (DOT4
+ R600_TReg32_X:$src0_X, R600_TReg32_X:$src1_X,
+ R600_TReg32_Y:$src0_Y, R600_TReg32_Y:$src1_Y,
+ R600_TReg32_Z:$src0_Z, R600_TReg32_Z:$src1_Z,
+ R600_TReg32_W:$src0_W, R600_TReg32_W:$src1_W))]>;
+
+
+class DOT4_Common <bits<11> inst> : R600_2OP <inst, "DOT4", []>;
+
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
multiclass CUBE_Common <bits<11> inst> {
def _pseudo : InstR600 <
(outs R600_Reg128:$dst),
- (ins R600_Reg128:$src),
- "CUBE $dst $src",
- [(set v4f32:$dst, (int_AMDGPU_cube v4f32:$src))],
+ (ins R600_Reg128:$src0),
+ "CUBE $dst $src0",
+ [(set v4f32:$dst, (int_AMDGPU_cube v4f32:$src0))],
VecALU
> {
let isPseudo = 1;
+ let UseNamedOperandTable = 1;
}
def _real : R600_2OP <inst, "CUBE", []>;
@@ -1284,35 +1009,30 @@ multiclass CUBE_Common <bits<11> inst> {
class EXP_IEEE_Common <bits<11> inst> : R600_1OP_Helper <
inst, "EXP_IEEE", fexp2
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class FLT_TO_INT_Common <bits<11> inst> : R600_1OP_Helper <
inst, "FLT_TO_INT", fp_to_sint
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class INT_TO_FLT_Common <bits<11> inst> : R600_1OP_Helper <
inst, "INT_TO_FLT", sint_to_fp
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class FLT_TO_UINT_Common <bits<11> inst> : R600_1OP_Helper <
inst, "FLT_TO_UINT", fp_to_uint
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class UINT_TO_FLT_Common <bits<11> inst> : R600_1OP_Helper <
inst, "UINT_TO_FLT", uint_to_fp
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
@@ -1323,7 +1043,6 @@ class LOG_CLAMPED_Common <bits<11> inst> : R600_1OP <
class LOG_IEEE_Common <bits<11> inst> : R600_1OP_Helper <
inst, "LOG_IEEE", flog2
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
@@ -1333,75 +1052,68 @@ class ASHR_Common <bits<11> inst> : R600_2OP_Helper <inst, "ASHR", sra>;
class MULHI_INT_Common <bits<11> inst> : R600_2OP_Helper <
inst, "MULHI_INT", mulhs
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class MULHI_UINT_Common <bits<11> inst> : R600_2OP_Helper <
inst, "MULHI", mulhu
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class MULLO_INT_Common <bits<11> inst> : R600_2OP_Helper <
inst, "MULLO_INT", mul
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class MULLO_UINT_Common <bits<11> inst> : R600_2OP <inst, "MULLO_UINT", []> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class RECIP_CLAMPED_Common <bits<11> inst> : R600_1OP <
inst, "RECIP_CLAMPED", []
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class RECIP_IEEE_Common <bits<11> inst> : R600_1OP <
inst, "RECIP_IEEE", [(set f32:$dst, (fdiv FP_ONE, f32:$src0))]
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class RECIP_UINT_Common <bits<11> inst> : R600_1OP_Helper <
inst, "RECIP_UINT", AMDGPUurecip
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class RECIPSQRT_CLAMPED_Common <bits<11> inst> : R600_1OP_Helper <
inst, "RECIPSQRT_CLAMPED", int_AMDGPU_rsq
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class RECIPSQRT_IEEE_Common <bits<11> inst> : R600_1OP <
inst, "RECIPSQRT_IEEE", []
> {
- let TransOnly = 1;
let Itinerary = TransALU;
}
class SIN_Common <bits<11> inst> : R600_1OP <
- inst, "SIN", []>{
+ inst, "SIN", [(set f32:$dst, (SIN_HW f32:$src0))]>{
let Trig = 1;
- let TransOnly = 1;
let Itinerary = TransALU;
}
class COS_Common <bits<11> inst> : R600_1OP <
- inst, "COS", []> {
+ inst, "COS", [(set f32:$dst, (COS_HW f32:$src0))]> {
let Trig = 1;
- let TransOnly = 1;
let Itinerary = TransALU;
}
+def CLAMP_R600 : CLAMP <R600_Reg32>;
+def FABS_R600 : FABS<R600_Reg32>;
+def FNEG_R600 : FNEG<R600_Reg32>;
+
//===----------------------------------------------------------------------===//
// Helper patterns for complex intrinsics
//===----------------------------------------------------------------------===//
@@ -1424,6 +1136,13 @@ class TGSI_LIT_Z_Common <InstR600 mul_lit, InstR600 log_clamped, InstR600 exp_ie
(exp_ieee (mul_lit (log_clamped (MAX $src_y, (f32 ZERO))), $src_w, $src_x))
>;
+// FROUND pattern
+class FROUNDPat<Instruction CNDGE> : Pat <
+ (AMDGPUround f32:$x),
+ (CNDGE (ADD (FNEG_R600 (f32 HALF)), (FRACT $x)), (CEIL $x), (FLOOR $x))
+>;
+
+
//===----------------------------------------------------------------------===//
// R600 / R700 Instructions
//===----------------------------------------------------------------------===//
@@ -1436,7 +1155,7 @@ let Predicates = [isR600] in {
def CNDE_r600 : CNDE_Common<0x18>;
def CNDGT_r600 : CNDGT_Common<0x19>;
def CNDGE_r600 : CNDGE_Common<0x1A>;
- defm DOT4_r600 : DOT4_Common<0x50>;
+ def DOT4_r600 : DOT4_Common<0x50>;
defm CUBE_r600 : CUBE_Common<0x52>;
def EXP_IEEE_r600 : EXP_IEEE_Common<0x61>;
def LOG_CLAMPED_r600 : LOG_CLAMPED_Common<0x62>;
@@ -1465,11 +1184,12 @@ let Predicates = [isR600] in {
def TGSI_LIT_Z_r600 : TGSI_LIT_Z_Common<MUL_LIT_r600, LOG_CLAMPED_r600, EXP_IEEE_r600>;
def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_r600 $src))>;
+ def : FROUNDPat <CNDGE_r600>;
def R600_ExportSwz : ExportSwzInst {
let Word1{20-17} = 0; // BURST_COUNT
let Word1{21} = eop;
- let Word1{22} = 1; // VALID_PIXEL_MODE
+ let Word1{22} = 0; // VALID_PIXEL_MODE
let Word1{30-23} = inst;
let Word1{31} = 1; // BARRIER
}
@@ -1478,58 +1198,58 @@ let Predicates = [isR600] in {
def R600_ExportBuf : ExportBufInst {
let Word1{20-17} = 0; // BURST_COUNT
let Word1{21} = eop;
- let Word1{22} = 1; // VALID_PIXEL_MODE
+ let Word1{22} = 0; // VALID_PIXEL_MODE
let Word1{30-23} = inst;
let Word1{31} = 1; // BARRIER
}
defm : SteamOutputExportPattern<R600_ExportBuf, 0x20, 0x21, 0x22, 0x23>;
- def CF_TC_R600 : CF_CLAUSE_R600<1, (ins i32imm:$ADDR, i32imm:$COUNT),
- "TEX $COUNT @$ADDR"> {
+ def CF_TC_R600 : CF_CLAUSE_R600<1, (ins i32imm:$ADDR, i32imm:$CNT),
+ "TEX $CNT @$ADDR"> {
let POP_COUNT = 0;
}
- def CF_VC_R600 : CF_CLAUSE_R600<2, (ins i32imm:$ADDR, i32imm:$COUNT),
- "VTX $COUNT @$ADDR"> {
+ def CF_VC_R600 : CF_CLAUSE_R600<2, (ins i32imm:$ADDR, i32imm:$CNT),
+ "VTX $CNT @$ADDR"> {
let POP_COUNT = 0;
}
def WHILE_LOOP_R600 : CF_CLAUSE_R600<6, (ins i32imm:$ADDR),
"LOOP_START_DX10 @$ADDR"> {
let POP_COUNT = 0;
- let COUNT = 0;
+ let CNT = 0;
}
def END_LOOP_R600 : CF_CLAUSE_R600<5, (ins i32imm:$ADDR), "END_LOOP @$ADDR"> {
let POP_COUNT = 0;
- let COUNT = 0;
+ let CNT = 0;
}
def LOOP_BREAK_R600 : CF_CLAUSE_R600<9, (ins i32imm:$ADDR),
"LOOP_BREAK @$ADDR"> {
let POP_COUNT = 0;
- let COUNT = 0;
+ let CNT = 0;
}
def CF_CONTINUE_R600 : CF_CLAUSE_R600<8, (ins i32imm:$ADDR),
"CONTINUE @$ADDR"> {
let POP_COUNT = 0;
- let COUNT = 0;
+ let CNT = 0;
}
def CF_JUMP_R600 : CF_CLAUSE_R600<10, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
"JUMP @$ADDR POP:$POP_COUNT"> {
- let COUNT = 0;
+ let CNT = 0;
}
def CF_ELSE_R600 : CF_CLAUSE_R600<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
"ELSE @$ADDR POP:$POP_COUNT"> {
- let COUNT = 0;
+ let CNT = 0;
}
def CF_CALL_FS_R600 : CF_CLAUSE_R600<19, (ins), "CALL_FS"> {
let ADDR = 0;
- let COUNT = 0;
+ let CNT = 0;
let POP_COUNT = 0;
}
def POP_R600 : CF_CLAUSE_R600<14, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
"POP @$ADDR POP:$POP_COUNT"> {
- let COUNT = 0;
+ let CNT = 0;
}
def CF_END_R600 : CF_CLAUSE_R600<0, (ins), "CF_END"> {
- let COUNT = 0;
+ let CNT = 0;
let POP_COUNT = 0;
let ADDR = 0;
let END_OF_PROGRAM = 1;
@@ -1537,18 +1257,6 @@ let Predicates = [isR600] in {
}
-// Helper pattern for normalizing inputs to triginomic instructions for R700+
-// cards.
-class COS_PAT <InstR600 trig> : Pat<
- (fcos f32:$src),
- (trig (MUL_IEEE (MOV_IMM_I32 CONST.TWO_PI_INV), $src))
->;
-
-class SIN_PAT <InstR600 trig> : Pat<
- (fsin f32:$src),
- (trig (MUL_IEEE (MOV_IMM_I32 CONST.TWO_PI_INV), $src))
->;
-
//===----------------------------------------------------------------------===//
// R700 Only instructions
//===----------------------------------------------------------------------===//
@@ -1556,12 +1264,35 @@ class SIN_PAT <InstR600 trig> : Pat<
let Predicates = [isR700] in {
def SIN_r700 : SIN_Common<0x6E>;
def COS_r700 : COS_Common<0x6F>;
+}
+
+//===----------------------------------------------------------------------===//
+// Evergreen / Cayman store instructions
+//===----------------------------------------------------------------------===//
- // R700 normalizes inputs to SIN/COS the same as EG
- def : SIN_PAT <SIN_r700>;
- def : COS_PAT <COS_r700>;
+let Predicates = [isEGorCayman] in {
+
+class CF_MEM_RAT_CACHELESS <bits<6> rat_inst, bits<4> rat_id, bits<4> mask, dag ins,
+ string name, list<dag> pattern>
+ : EG_CF_RAT <0x57, rat_inst, rat_id, mask, (outs), ins,
+ "MEM_RAT_CACHELESS "#name, pattern>;
+
+class CF_MEM_RAT <bits<6> rat_inst, bits<4> rat_id, dag ins, string name,
+ list<dag> pattern>
+ : EG_CF_RAT <0x56, rat_inst, rat_id, 0xf /* mask */, (outs), ins,
+ "MEM_RAT "#name, pattern>;
+
+def RAT_MSKOR : CF_MEM_RAT <0x11, 0,
+ (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr),
+ "MSKOR $rw_gpr.XW, $index_gpr",
+ [(mskor_global v4i32:$rw_gpr, i32:$index_gpr)]
+> {
+ let eop = 0;
}
+} // End Predicates = [isEGorCayman]
+
+
//===----------------------------------------------------------------------===//
// Evergreen Only instructions
//===----------------------------------------------------------------------===//
@@ -1585,9 +1316,179 @@ def SIN_eg : SIN_Common<0x8D>;
def COS_eg : COS_Common<0x8E>;
def : POW_Common <LOG_IEEE_eg, EXP_IEEE_eg, MUL>;
-def : SIN_PAT <SIN_eg>;
-def : COS_PAT <COS_eg>;
def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_eg $src))>;
+
+//===----------------------------------------------------------------------===//
+// Memory read/write instructions
+//===----------------------------------------------------------------------===//
+
+let usesCustomInserter = 1 in {
+
+// 32-bit store
+def RAT_WRITE_CACHELESS_32_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0x1,
+ (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
+ "STORE_RAW $rw_gpr, $index_gpr, $eop",
+ [(global_store i32:$rw_gpr, i32:$index_gpr)]
+>;
+
+// 64-bit store
+def RAT_WRITE_CACHELESS_64_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0x3,
+ (ins R600_Reg64:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
+ "STORE_RAW $rw_gpr.XY, $index_gpr, $eop",
+ [(global_store v2i32:$rw_gpr, i32:$index_gpr)]
+>;
+
+//128-bit store
+def RAT_WRITE_CACHELESS_128_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0xf,
+ (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
+ "STORE_RAW $rw_gpr.XYZW, $index_gpr, $eop",
+ [(global_store v4i32:$rw_gpr, i32:$index_gpr)]
+>;
+
+} // End usesCustomInserter = 1
+
+class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
+ : VTX_WORD0_eg, VTX_READ<name, buffer_id, outs, pattern> {
+
+ // Static fields
+ let VC_INST = 0;
+ let FETCH_TYPE = 2;
+ let FETCH_WHOLE_QUAD = 0;
+ let BUFFER_ID = buffer_id;
+ let SRC_REL = 0;
+ // XXX: We can infer this field based on the SRC_GPR. This would allow us
+ // to store vertex addresses in any channel, not just X.
+ let SRC_SEL_X = 0;
+
+ let Inst{31-0} = Word0;
+}
+
+class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 1;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 1; // FMT_8
+}
+
+class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+ let MEGA_FETCH_COUNT = 2;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 5; // FMT_16
+
+}
+
+class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 4;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 7; // Masked
+ let DST_SEL_Z = 7; // Masked
+ let DST_SEL_W = 7; // Masked
+ let DATA_FORMAT = 0xD; // COLOR_32
+
+ // This is not really necessary, but there were some GPU hangs that appeared
+ // to be caused by ALU instructions in the next instruction group that wrote
+ // to the $src_gpr registers of the VTX_READ.
+ // e.g.
+ // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
+ // %T2_X<def> = MOV %ZERO
+ //Adding this constraint prevents this from happening.
+ let Constraints = "$src_gpr.ptr = $dst_gpr";
+}
+
+class VTX_READ_64_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_64 $dst_gpr.XY, $src_gpr", buffer_id,
+ (outs R600_Reg64:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 8;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 1;
+ let DST_SEL_Z = 7;
+ let DST_SEL_W = 7;
+ let DATA_FORMAT = 0x1D; // COLOR_32_32
+}
+
+class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_eg <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id,
+ (outs R600_Reg128:$dst_gpr), pattern> {
+
+ let MEGA_FETCH_COUNT = 16;
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 1;
+ let DST_SEL_Z = 2;
+ let DST_SEL_W = 3;
+ let DATA_FORMAT = 0x22; // COLOR_32_32_32_32
+
+ // XXX: Need to force VTX_READ_128 instructions to write to the same register
+ // that holds its buffer address to avoid potential hangs. We can't use
+ // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst
+ // registers are different sizes.
+}
+
+//===----------------------------------------------------------------------===//
+// VTX Read from parameter memory space
+//===----------------------------------------------------------------------===//
+
+def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0,
+ [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0,
+ [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0,
+ [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_64_eg : VTX_READ_64_eg <0,
+ [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
+ [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
+>;
+
+//===----------------------------------------------------------------------===//
+// VTX Read from global memory space
+//===----------------------------------------------------------------------===//
+
+// 8-bit reads
+def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1,
+ [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
+>;
+
+def VTX_READ_GLOBAL_16_eg : VTX_READ_16_eg <1,
+ [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
+>;
+
+// 32-bit reads
+def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1,
+ [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 64-bit reads
+def VTX_READ_GLOBAL_64_eg : VTX_READ_64_eg <1,
+ [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 128-bit reads
+def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1,
+ [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+>;
+
} // End Predicates = [isEG]
//===----------------------------------------------------------------------===//
@@ -1620,10 +1521,11 @@ let Predicates = [isEGorCayman] in {
def BFI_INT_eg : R600_3OP <0x06, "BFI_INT", [], VecALU>;
defm : BFIPatterns <BFI_INT_eg>;
- def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT",
- [(set i32:$dst, (AMDGPUbitalign i32:$src0, i32:$src1, i32:$src2))],
- VecALU
+ def MULADD_UINT24_eg : R600_3OP <0x10, "MULADD_UINT24",
+ [(set i32:$dst, (add (mul U24:$src0, U24:$src1), i32:$src2))], VecALU
>;
+ def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>;
+ def : ROTRPattern <BIT_ALIGN_INT_eg>;
def MULADD_eg : MULADD_Common<0x14>;
def MULADD_IEEE_eg : MULADD_IEEE_Common<0x18>;
@@ -1635,7 +1537,10 @@ let Predicates = [isEGorCayman] in {
def CNDGE_eg : CNDGE_Common<0x1B>;
def MUL_LIT_eg : MUL_LIT_Common<0x1F>;
def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>;
- defm DOT4_eg : DOT4_Common<0xBE>;
+ def MUL_UINT24_eg : R600_2OP <0xB5, "MUL_UINT24",
+ [(set i32:$dst, (mul U24:$src0, U24:$src1))], VecALU
+ >;
+ def DOT4_eg : DOT4_Common<0xBE>;
defm CUBE_eg : CUBE_Common<0xC0>;
let hasSideEffects = 1 in {
@@ -1646,6 +1551,7 @@ let hasSideEffects = 1 in {
def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50> {
let Pattern = [];
+ let Itinerary = AnyALU;
}
def INT_TO_FLT_eg : INT_TO_FLT_Common<0x9B>;
@@ -1656,6 +1562,165 @@ let hasSideEffects = 1 in {
def UINT_TO_FLT_eg : UINT_TO_FLT_Common<0x9C>;
+def GROUP_BARRIER : InstR600 <
+ (outs), (ins), " GROUP_BARRIER", [(int_AMDGPU_barrier_local)], AnyALU>,
+ R600ALU_Word0,
+ R600ALU_Word1_OP2 <0x54> {
+
+ let dst = 0;
+ let dst_rel = 0;
+ let src0 = 0;
+ let src0_rel = 0;
+ let src0_neg = 0;
+ let src0_abs = 0;
+ let src1 = 0;
+ let src1_rel = 0;
+ let src1_neg = 0;
+ let src1_abs = 0;
+ let write = 0;
+ let omod = 0;
+ let clamp = 0;
+ let last = 1;
+ let bank_swizzle = 0;
+ let pred_sel = 0;
+ let update_exec_mask = 0;
+ let update_pred = 0;
+
+ let Inst{31-0} = Word0;
+ let Inst{63-32} = Word1;
+
+ let ALUInst = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// LDS Instructions
+//===----------------------------------------------------------------------===//
+class R600_LDS <bits<6> op, dag outs, dag ins, string asm,
+ list<dag> pattern = []> :
+
+ InstR600 <outs, ins, asm, pattern, XALU>,
+ R600_ALU_LDS_Word0,
+ R600LDS_Word1 {
+
+ bits<6> offset = 0;
+ let lds_op = op;
+
+ let Word1{27} = offset{0};
+ let Word1{12} = offset{1};
+ let Word1{28} = offset{2};
+ let Word1{31} = offset{3};
+ let Word0{12} = offset{4};
+ let Word0{25} = offset{5};
+
+
+ let Inst{31-0} = Word0;
+ let Inst{63-32} = Word1;
+
+ let ALUInst = 1;
+ let HasNativeOperands = 1;
+ let UseNamedOperandTable = 1;
+}
+
+class R600_LDS_1A <bits<6> lds_op, string name, list<dag> pattern> : R600_LDS <
+ lds_op,
+ (outs R600_Reg32:$dst),
+ (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+ LAST:$last, R600_Pred:$pred_sel,
+ BANK_SWIZZLE:$bank_swizzle),
+ " "#name#" $last OQAP, $src0$src0_rel $pred_sel",
+ pattern
+ > {
+
+ let src1 = 0;
+ let src1_rel = 0;
+ let src2 = 0;
+ let src2_rel = 0;
+
+ let Defs = [OQAP];
+ let usesCustomInserter = 1;
+ let LDS_1A = 1;
+ let DisableEncoding = "$dst";
+}
+
+class R600_LDS_1A1D <bits<6> lds_op, dag outs, string name, list<dag> pattern,
+ string dst =""> :
+ R600_LDS <
+ lds_op, outs,
+ (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+ R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
+ LAST:$last, R600_Pred:$pred_sel,
+ BANK_SWIZZLE:$bank_swizzle),
+ " "#name#" $last "#dst#"$src0$src0_rel, $src1$src1_rel, $pred_sel",
+ pattern
+ > {
+
+ field string BaseOp;
+
+ let src2 = 0;
+ let src2_rel = 0;
+ let LDS_1A1D = 1;
+}
+
+class R600_LDS_1A1D_NORET <bits<6> lds_op, string name, list<dag> pattern> :
+ R600_LDS_1A1D <lds_op, (outs), name, pattern> {
+ let BaseOp = name;
+}
+
+class R600_LDS_1A1D_RET <bits<6> lds_op, string name, list<dag> pattern> :
+ R600_LDS_1A1D <lds_op, (outs R600_Reg32:$dst), name##"_RET", pattern, "OQAP, "> {
+
+ let BaseOp = name;
+ let usesCustomInserter = 1;
+ let DisableEncoding = "$dst";
+ let Defs = [OQAP];
+}
+
+class R600_LDS_1A2D <bits<6> lds_op, string name, list<dag> pattern> :
+ R600_LDS <
+ lds_op,
+ (outs),
+ (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
+ R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
+ R600_Reg32:$src2, REL:$src2_rel, SEL:$src2_sel,
+ LAST:$last, R600_Pred:$pred_sel, BANK_SWIZZLE:$bank_swizzle),
+ " "#name# "$last $src0$src0_rel, $src1$src1_rel, $src2$src2_rel, $pred_sel",
+ pattern> {
+ let LDS_1A2D = 1;
+}
+
+def LDS_ADD : R600_LDS_1A1D_NORET <0x0, "LDS_ADD", [] >;
+def LDS_SUB : R600_LDS_1A1D_NORET <0x1, "LDS_SUB", [] >;
+def LDS_WRITE : R600_LDS_1A1D_NORET <0xD, "LDS_WRITE",
+ [(local_store (i32 R600_Reg32:$src1), R600_Reg32:$src0)]
+>;
+def LDS_BYTE_WRITE : R600_LDS_1A1D_NORET<0x12, "LDS_BYTE_WRITE",
+ [(truncstorei8_local i32:$src1, i32:$src0)]
+>;
+def LDS_SHORT_WRITE : R600_LDS_1A1D_NORET<0x13, "LDS_SHORT_WRITE",
+ [(truncstorei16_local i32:$src1, i32:$src0)]
+>;
+def LDS_ADD_RET : R600_LDS_1A1D_RET <0x20, "LDS_ADD",
+ [(set i32:$dst, (atomic_load_add_local i32:$src0, i32:$src1))]
+>;
+def LDS_SUB_RET : R600_LDS_1A1D_RET <0x21, "LDS_SUB",
+ [(set i32:$dst, (atomic_load_sub_local i32:$src0, i32:$src1))]
+>;
+def LDS_READ_RET : R600_LDS_1A <0x32, "LDS_READ_RET",
+ [(set (i32 R600_Reg32:$dst), (local_load R600_Reg32:$src0))]
+>;
+def LDS_BYTE_READ_RET : R600_LDS_1A <0x36, "LDS_BYTE_READ_RET",
+ [(set i32:$dst, (sextloadi8_local i32:$src0))]
+>;
+def LDS_UBYTE_READ_RET : R600_LDS_1A <0x37, "LDS_UBYTE_READ_RET",
+ [(set i32:$dst, (az_extloadi8_local i32:$src0))]
+>;
+def LDS_SHORT_READ_RET : R600_LDS_1A <0x38, "LDS_SHORT_READ_RET",
+ [(set i32:$dst, (sextloadi16_local i32:$src0))]
+>;
+def LDS_USHORT_READ_RET : R600_LDS_1A <0x39, "LDS_USHORT_READ_RET",
+ [(set i32:$dst, (az_extloadi16_local i32:$src0))]
+>;
+
// TRUNC is used for the FLT_TO_INT instructions to work around a
// perceived problem where the rounding modes are applied differently
// depending on the instruction and the slot they are in.
@@ -1673,9 +1738,11 @@ let hasSideEffects = 1 in {
// SHA-256 Patterns
def : SHA256MaPattern <BFI_INT_eg, XOR_INT>;
+ def : FROUNDPat <CNDGE_eg>;
+
def EG_ExportSwz : ExportSwzInst {
let Word1{19-16} = 0; // BURST_COUNT
- let Word1{20} = 1; // VALID_PIXEL_MODE
+ let Word1{20} = 0; // VALID_PIXEL_MODE
let Word1{21} = eop;
let Word1{29-22} = inst;
let Word1{30} = 0; // MARK
@@ -1685,7 +1752,7 @@ let hasSideEffects = 1 in {
def EG_ExportBuf : ExportBufInst {
let Word1{19-16} = 0; // BURST_COUNT
- let Word1{20} = 1; // VALID_PIXEL_MODE
+ let Word1{20} = 0; // VALID_PIXEL_MODE
let Word1{21} = eop;
let Word1{29-22} = inst;
let Word1{30} = 0; // MARK
@@ -1744,48 +1811,78 @@ let hasSideEffects = 1 in {
let END_OF_PROGRAM = 1;
}
+} // End Predicates = [isEGorCayman]
+
//===----------------------------------------------------------------------===//
-// Memory read/write instructions
+// Regist loads and stores - for indirect addressing
//===----------------------------------------------------------------------===//
-let usesCustomInserter = 1 in {
-class RAT_WRITE_CACHELESS_eg <dag ins, bits<4> comp_mask, string name,
- list<dag> pattern>
- : EG_CF_RAT <0x57, 0x2, 0, (outs), ins,
- !strconcat(name, " $rw_gpr, $index_gpr, $eop"), pattern> {
- let RIM = 0;
- // XXX: Have a separate instruction for non-indexed writes.
- let TYPE = 1;
- let RW_REL = 0;
- let ELEM_SIZE = 0;
+defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>;
- let ARRAY_SIZE = 0;
- let COMP_MASK = comp_mask;
- let BURST_COUNT = 0;
- let VPM = 0;
- let MARK = 0;
- let BARRIER = 1;
-}
+//===----------------------------------------------------------------------===//
+// Cayman Instructions
+//===----------------------------------------------------------------------===//
-} // End usesCustomInserter = 1
+let Predicates = [isCayman] in {
-// 32-bit store
-def RAT_WRITE_CACHELESS_32_eg : RAT_WRITE_CACHELESS_eg <
- (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
- 0x1, "RAT_WRITE_CACHELESS_32_eg",
- [(global_store i32:$rw_gpr, i32:$index_gpr)]
+def MULADD_INT24_cm : R600_3OP <0x08, "MULADD_INT24",
+ [(set i32:$dst, (add (mul I24:$src0, I24:$src1), i32:$src2))], VecALU
+>;
+def MUL_INT24_cm : R600_2OP <0x5B, "MUL_INT24",
+ [(set i32:$dst, (mul I24:$src0, I24:$src1))], VecALU
>;
-//128-bit store
-def RAT_WRITE_CACHELESS_128_eg : RAT_WRITE_CACHELESS_eg <
- (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
- 0xf, "RAT_WRITE_CACHELESS_128",
- [(global_store v4i32:$rw_gpr, i32:$index_gpr)]
+let isVector = 1 in {
+
+def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;
+
+def MULLO_INT_cm : MULLO_INT_Common<0x8F>;
+def MULHI_INT_cm : MULHI_INT_Common<0x90>;
+def MULLO_UINT_cm : MULLO_UINT_Common<0x91>;
+def MULHI_UINT_cm : MULHI_UINT_Common<0x92>;
+def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>;
+def EXP_IEEE_cm : EXP_IEEE_Common<0x81>;
+def LOG_IEEE_cm : LOG_IEEE_Common<0x83>;
+def RECIP_CLAMPED_cm : RECIP_CLAMPED_Common<0x84>;
+def RECIPSQRT_IEEE_cm : RECIPSQRT_IEEE_Common<0x89>;
+def SIN_cm : SIN_Common<0x8D>;
+def COS_cm : COS_Common<0x8E>;
+} // End isVector = 1
+
+def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>;
+
+defm DIV_cm : DIV_Common<RECIP_IEEE_cm>;
+
+// RECIP_UINT emulation for Cayman
+// The multiplication scales from [0,1] to the unsigned integer range
+def : Pat <
+ (AMDGPUurecip i32:$src0),
+ (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg $src0)),
+ (MOV_IMM_I32 CONST.FP_UINT_MAX_PLUS_1)))
>;
-class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
- : InstR600ISA <outs, (ins MEMxi:$ptr), name#" $dst, $ptr", pattern>,
- VTX_WORD1_GPR, VTX_WORD0 {
+ def CF_END_CM : CF_CLAUSE_EG<32, (ins), "CF_END"> {
+ let ADDR = 0;
+ let POP_COUNT = 0;
+ let COUNT = 0;
+ }
+
+def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>;
+
+class RAT_STORE_DWORD <RegisterClass rc, ValueType vt, bits<4> mask> :
+ CF_MEM_RAT_CACHELESS <0x14, 0, mask,
+ (ins rc:$rw_gpr, R600_TReg32_X:$index_gpr),
+ "STORE_DWORD $rw_gpr, $index_gpr",
+ [(global_store vt:$rw_gpr, i32:$index_gpr)]> {
+ let eop = 0; // This bit is not used on Cayman.
+}
+
+def RAT_STORE_DWORD32 : RAT_STORE_DWORD <R600_TReg32_X, i32, 0x1>;
+def RAT_STORE_DWORD64 : RAT_STORE_DWORD <R600_Reg64, v2i32, 0x3>;
+def RAT_STORE_DWORD128 : RAT_STORE_DWORD <R600_Reg128, v4i32, 0xf>;
+
+class VTX_READ_cm <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
+ : VTX_WORD0_cm, VTX_READ<name, buffer_id, outs, pattern> {
// Static fields
let VC_INST = 0;
@@ -1796,53 +1893,18 @@ class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
// XXX: We can infer this field based on the SRC_GPR. This would allow us
// to store vertex addresses in any channel, not just X.
let SRC_SEL_X = 0;
- let DST_REL = 0;
- // The docs say that if this bit is set, then DATA_FORMAT, NUM_FORMAT_ALL,
- // FORMAT_COMP_ALL, SRF_MODE_ALL, and ENDIAN_SWAP fields will be ignored,
- // however, based on my testing if USE_CONST_FIELDS is set, then all
- // these fields need to be set to 0.
- let USE_CONST_FIELDS = 0;
- let NUM_FORMAT_ALL = 1;
- let FORMAT_COMP_ALL = 0;
- let SRF_MODE_ALL = 0;
+ let SRC_SEL_Y = 0;
+ let STRUCTURED_READ = 0;
+ let LDS_REQ = 0;
+ let COALESCED_READ = 0;
let Inst{31-0} = Word0;
- let Inst{63-32} = Word1;
- // LLVM can only encode 64-bit instructions, so these fields are manually
- // encoded in R600CodeEmitter
- //
- // bits<16> OFFSET;
- // bits<2> ENDIAN_SWAP = 0;
- // bits<1> CONST_BUF_NO_STRIDE = 0;
- // bits<1> MEGA_FETCH = 0;
- // bits<1> ALT_CONST = 0;
- // bits<2> BUFFER_INDEX_MODE = 0;
-
-
-
- // VTX_WORD2 (LLVM can only encode 64-bit instructions, so WORD2 encoding
- // is done in R600CodeEmitter
- //
- // Inst{79-64} = OFFSET;
- // Inst{81-80} = ENDIAN_SWAP;
- // Inst{82} = CONST_BUF_NO_STRIDE;
- // Inst{83} = MEGA_FETCH;
- // Inst{84} = ALT_CONST;
- // Inst{86-85} = BUFFER_INDEX_MODE;
- // Inst{95-86} = 0; Reserved
-
- // VTX_WORD3 (Padding)
- //
- // Inst{127-96} = 0;
-
- let VTXInst = 1;
}
-class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_8", buffer_id, (outs R600_TReg32_X:$dst),
- pattern> {
+class VTX_READ_8_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
- let MEGA_FETCH_COUNT = 1;
let DST_SEL_X = 0;
let DST_SEL_Y = 7; // Masked
let DST_SEL_Z = 7; // Masked
@@ -1850,10 +1912,9 @@ class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern>
let DATA_FORMAT = 1; // FMT_8
}
-class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_16", buffer_id, (outs R600_TReg32_X:$dst),
- pattern> {
- let MEGA_FETCH_COUNT = 2;
+class VTX_READ_16_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
let DST_SEL_X = 0;
let DST_SEL_Y = 7; // Masked
let DST_SEL_Z = 7; // Masked
@@ -1862,11 +1923,10 @@ class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern>
}
-class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_32", buffer_id, (outs R600_TReg32_X:$dst),
- pattern> {
+class VTX_READ_32_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_TReg32_X:$dst_gpr), pattern> {
- let MEGA_FETCH_COUNT = 4;
let DST_SEL_X = 0;
let DST_SEL_Y = 7; // Masked
let DST_SEL_Z = 7; // Masked
@@ -1875,19 +1935,29 @@ class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern>
// This is not really necessary, but there were some GPU hangs that appeared
// to be caused by ALU instructions in the next instruction group that wrote
- // to the $ptr registers of the VTX_READ.
+ // to the $src_gpr registers of the VTX_READ.
// e.g.
// %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
// %T2_X<def> = MOV %ZERO
//Adding this constraint prevents this from happening.
- let Constraints = "$ptr.ptr = $dst";
+ let Constraints = "$src_gpr.ptr = $dst_gpr";
}
-class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
- : VTX_READ_eg <"VTX_READ_128", buffer_id, (outs R600_Reg128:$dst),
- pattern> {
+class VTX_READ_64_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_64 $dst_gpr, $src_gpr", buffer_id,
+ (outs R600_Reg64:$dst_gpr), pattern> {
+
+ let DST_SEL_X = 0;
+ let DST_SEL_Y = 1;
+ let DST_SEL_Z = 7;
+ let DST_SEL_W = 7;
+ let DATA_FORMAT = 0x1D; // COLOR_32_32
+}
+
+class VTX_READ_128_cm <bits<8> buffer_id, list<dag> pattern>
+ : VTX_READ_cm <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id,
+ (outs R600_Reg128:$dst_gpr), pattern> {
- let MEGA_FETCH_COUNT = 16;
let DST_SEL_X = 0;
let DST_SEL_Y = 1;
let DST_SEL_Z = 2;
@@ -1896,28 +1966,31 @@ class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
// XXX: Need to force VTX_READ_128 instructions to write to the same register
// that holds its buffer address to avoid potential hangs. We can't use
- // the same constraint as VTX_READ_32_eg, because the $ptr.ptr and $dst
+ // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst
// registers are different sizes.
}
//===----------------------------------------------------------------------===//
// VTX Read from parameter memory space
//===----------------------------------------------------------------------===//
+def VTX_READ_PARAM_8_cm : VTX_READ_8_cm <0,
+ [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
+>;
-def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0,
- [(set i32:$dst, (load_param_zexti8 ADDRVTX_READ:$ptr))]
+def VTX_READ_PARAM_16_cm : VTX_READ_16_cm <0,
+ [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0,
- [(set i32:$dst, (load_param_zexti16 ADDRVTX_READ:$ptr))]
+def VTX_READ_PARAM_32_cm : VTX_READ_32_cm <0,
+ [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0,
- [(set i32:$dst, (load_param ADDRVTX_READ:$ptr))]
+def VTX_READ_PARAM_64_cm : VTX_READ_64_cm <0,
+ [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
- [(set v4i32:$dst, (load_param ADDRVTX_READ:$ptr))]
+def VTX_READ_PARAM_128_cm : VTX_READ_128_cm <0,
+ [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
>;
//===----------------------------------------------------------------------===//
@@ -1925,78 +1998,29 @@ def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
//===----------------------------------------------------------------------===//
// 8-bit reads
-def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1,
- [(set i32:$dst, (zextloadi8_global ADDRVTX_READ:$ptr))]
+def VTX_READ_GLOBAL_8_cm : VTX_READ_8_cm <1,
+ [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
>;
-// 32-bit reads
-def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1,
- [(set i32:$dst, (global_load ADDRVTX_READ:$ptr))]
+def VTX_READ_GLOBAL_16_cm : VTX_READ_16_cm <1,
+ [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
>;
-// 128-bit reads
-def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1,
- [(set v4i32:$dst, (global_load ADDRVTX_READ:$ptr))]
+// 32-bit reads
+def VTX_READ_GLOBAL_32_cm : VTX_READ_32_cm <1,
+ [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
>;
-//===----------------------------------------------------------------------===//
-// Constant Loads
-// XXX: We are currently storing all constants in the global address space.
-//===----------------------------------------------------------------------===//
-
-def CONSTANT_LOAD_eg : VTX_READ_32_eg <1,
- [(set i32:$dst, (constant_load ADDRVTX_READ:$ptr))]
+// 64-bit reads
+def VTX_READ_GLOBAL_64_cm : VTX_READ_64_cm <1,
+ [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
>;
-}
-
-//===----------------------------------------------------------------------===//
-// Regist loads and stores - for indirect addressing
-//===----------------------------------------------------------------------===//
-
-defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>;
-
-let Predicates = [isCayman] in {
-
-let isVector = 1 in {
-
-def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;
-
-def MULLO_INT_cm : MULLO_INT_Common<0x8F>;
-def MULHI_INT_cm : MULHI_INT_Common<0x90>;
-def MULLO_UINT_cm : MULLO_UINT_Common<0x91>;
-def MULHI_UINT_cm : MULHI_UINT_Common<0x92>;
-def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>;
-def EXP_IEEE_cm : EXP_IEEE_Common<0x81>;
-def LOG_IEEE_cm : LOG_IEEE_Common<0x83>;
-def RECIP_CLAMPED_cm : RECIP_CLAMPED_Common<0x84>;
-def RECIPSQRT_IEEE_cm : RECIPSQRT_IEEE_Common<0x89>;
-def SIN_cm : SIN_Common<0x8D>;
-def COS_cm : COS_Common<0x8E>;
-} // End isVector = 1
-
-def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>;
-def : SIN_PAT <SIN_cm>;
-def : COS_PAT <COS_cm>;
-
-defm DIV_cm : DIV_Common<RECIP_IEEE_cm>;
-
-// RECIP_UINT emulation for Cayman
-// The multiplication scales from [0,1] to the unsigned integer range
-def : Pat <
- (AMDGPUurecip i32:$src0),
- (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg $src0)),
- (MOV_IMM_I32 CONST.FP_UINT_MAX_PLUS_1)))
+// 128-bit reads
+def VTX_READ_GLOBAL_128_cm : VTX_READ_128_cm <1,
+ [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
>;
- def CF_END_CM : CF_CLAUSE_EG<32, (ins), "CF_END"> {
- let ADDR = 0;
- let POP_COUNT = 0;
- let COUNT = 0;
- }
-
-def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>;
-
} // End isCayman
//===----------------------------------------------------------------------===//
@@ -2007,9 +2031,6 @@ def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>;
def IF_PREDICATE_SET : ILFormat<(outs), (ins GPRI32:$src),
"IF_PREDICATE_SET $src", []>;
-def PREDICATED_BREAK : ILFormat<(outs), (ins GPRI32:$src),
- "PREDICATED_BREAK $src", []>;
-
//===----------------------------------------------------------------------===//
// Pseudo instructions
//===----------------------------------------------------------------------===//
@@ -2083,10 +2104,6 @@ def TXD_SHADOW: InstR600 <
} // End isPseudo = 1
} // End usesCustomInserter = 1
-def CLAMP_R600 : CLAMP <R600_Reg32>;
-def FABS_R600 : FABS<R600_Reg32>;
-def FNEG_R600 : FNEG<R600_Reg32>;
-
//===---------------------------------------------------------------------===//
// Return instruction
//===---------------------------------------------------------------------===//
@@ -2117,7 +2134,7 @@ def CONST_COPY : Instruction {
def TEX_VTX_CONSTBUF :
InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "VTX_READ_eg $dst, $ptr",
[(set v4i32:$dst, (CONST_ADDRESS ADDRGA_VAR_OFFSET:$ptr, (i32 imm:$BUFFER_ID)))]>,
- VTX_WORD1_GPR, VTX_WORD0 {
+ VTX_WORD1_GPR, VTX_WORD0_eg {
let VC_INST = 0;
let FETCH_TYPE = 2;
@@ -2171,7 +2188,7 @@ def TEX_VTX_CONSTBUF :
def TEX_VTX_TEXBUF:
InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "TEX_VTX_EXPLICIT_READ $dst, $ptr",
[(set v4f32:$dst, (int_R600_load_texbuf ADDRGA_VAR_OFFSET:$ptr, imm:$BUFFER_ID))]>,
-VTX_WORD1_GPR, VTX_WORD0 {
+VTX_WORD1_GPR, VTX_WORD0_eg {
let VC_INST = 0;
let FETCH_TYPE = 2;
@@ -2235,7 +2252,7 @@ let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in {
def BRANCH : ILFormat<(outs), (ins brtarget:$target),
"; Pseudo unconditional branch instruction",
[(br bb:$target)]>;
- defm BRANCH_COND : BranchConditional<IL_brcond>;
+ defm BRANCH_COND : BranchConditional<IL_brcond, R600_Reg32, R600_Reg32>;
}
//===---------------------------------------------------------------------===//
@@ -2306,7 +2323,7 @@ def : CND_INT_f32 <CNDGE_INT, SETGE>;
//CNDGE_INT extra pattern
def : Pat <
- (selectcc i32:$src0, -1, i32:$src1, i32:$src2, COND_GT),
+ (selectcc i32:$src0, -1, i32:$src1, i32:$src2, COND_SGT),
(CNDGE_INT $src0, $src1, $src2)
>;
@@ -2321,86 +2338,6 @@ def KIL : Pat <
(MASK_WRITE (KILLGT (f32 ZERO), $src0))
>;
-// SGT Reverse args
-def : Pat <
- (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_LT),
- (SGT $src1, $src0)
->;
-
-// SGE Reverse args
-def : Pat <
- (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, COND_LE),
- (SGE $src1, $src0)
->;
-
-// SETGT_DX10 reverse args
-def : Pat <
- (selectcc f32:$src0, f32:$src1, -1, 0, COND_LT),
- (SETGT_DX10 $src1, $src0)
->;
-
-// SETGE_DX10 reverse args
-def : Pat <
- (selectcc f32:$src0, f32:$src1, -1, 0, COND_LE),
- (SETGE_DX10 $src1, $src0)
->;
-
-// SETGT_INT reverse args
-def : Pat <
- (selectcc i32:$src0, i32:$src1, -1, 0, SETLT),
- (SETGT_INT $src1, $src0)
->;
-
-// SETGE_INT reverse args
-def : Pat <
- (selectcc i32:$src0, i32:$src1, -1, 0, SETLE),
- (SETGE_INT $src1, $src0)
->;
-
-// SETGT_UINT reverse args
-def : Pat <
- (selectcc i32:$src0, i32:$src1, -1, 0, SETULT),
- (SETGT_UINT $src1, $src0)
->;
-
-// SETGE_UINT reverse args
-def : Pat <
- (selectcc i32:$src0, i32:$src1, -1, 0, SETULE),
- (SETGE_UINT $src1, $src0)
->;
-
-// The next two patterns are special cases for handling 'true if ordered' and
-// 'true if unordered' conditionals. The assumption here is that the behavior of
-// SETE and SNE conforms to the Direct3D 10 rules for floating point values
-// described here:
-// http://msdn.microsoft.com/en-us/library/windows/desktop/cc308050.aspx#alpha_32_bit
-// We assume that SETE returns false when one of the operands is NAN and
-// SNE returns true when on of the operands is NAN
-
-//SETE - 'true if ordered'
-def : Pat <
- (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, SETO),
- (SETE $src0, $src1)
->;
-
-//SETE_DX10 - 'true if ordered'
-def : Pat <
- (selectcc f32:$src0, f32:$src1, -1, 0, SETO),
- (SETE_DX10 $src0, $src1)
->;
-
-//SNE - 'true if unordered'
-def : Pat <
- (selectcc f32:$src0, f32:$src1, FP_ONE, FP_ZERO, SETUO),
- (SNE $src0, $src1)
->;
-
-//SETNE_DX10 - 'true if ordered'
-def : Pat <
- (selectcc f32:$src0, f32:$src1, -1, 0, SETUO),
- (SETNE_DX10 $src0, $src1)
->;
-
def : Extract_Element <f32, v4f32, 0, sub0>;
def : Extract_Element <f32, v4f32, 1, sub1>;
def : Extract_Element <f32, v4f32, 2, sub2>;
@@ -2424,10 +2361,24 @@ def : Insert_Element <i32, v4i32, 3, sub3>;
def : Vector4_Build <v4f32, f32>;
def : Vector4_Build <v4i32, i32>;
+def : Extract_Element <f32, v2f32, 0, sub0>;
+def : Extract_Element <f32, v2f32, 1, sub1>;
+
+def : Insert_Element <f32, v2f32, 0, sub0>;
+def : Insert_Element <f32, v2f32, 1, sub1>;
+
+def : Extract_Element <i32, v2i32, 0, sub0>;
+def : Extract_Element <i32, v2i32, 1, sub1>;
+
+def : Insert_Element <i32, v2i32, 0, sub0>;
+def : Insert_Element <i32, v2i32, 1, sub1>;
+
// bitconvert patterns
def : BitConvert <i32, f32, R600_Reg32>;
def : BitConvert <f32, i32, R600_Reg32>;
+def : BitConvert <v2f32, v2i32, R600_Reg64>;
+def : BitConvert <v2i32, v2f32, R600_Reg64>;
def : BitConvert <v4f32, v4i32, R600_Reg128>;
def : BitConvert <v4i32, v4f32, R600_Reg128>;
@@ -2435,3 +2386,11 @@ def : BitConvert <v4i32, v4f32, R600_Reg128>;
def : DwordAddrPat <i32, R600_Reg32>;
} // End isR600toCayman Predicate
+
+def getLDSNoRetOp : InstrMapping {
+ let FilterClass = "R600_LDS_1A1D";
+ let RowFields = ["BaseOp"];
+ let ColFields = ["DisableEncoding"];
+ let KeyCol = ["$dst"];
+ let ValueCols = [[""""]];
+}
diff --git a/lib/Target/R600/R600Intrinsics.td b/lib/Target/R600/R600Intrinsics.td
index dc8980a..9681747 100644
--- a/lib/Target/R600/R600Intrinsics.td
+++ b/lib/Target/R600/R600Intrinsics.td
@@ -12,12 +12,56 @@
//===----------------------------------------------------------------------===//
let TargetPrefix = "R600", isTarget = 1 in {
+ class TextureIntrinsicFloatInput :
+ Intrinsic<[llvm_v4f32_ty], [
+ llvm_v4f32_ty, // Coord
+ llvm_i32_ty, // offset_x
+ llvm_i32_ty, // offset_y,
+ llvm_i32_ty, // offset_z,
+ llvm_i32_ty, // resource_id
+ llvm_i32_ty, // samplerid
+ llvm_i32_ty, // coord_type_x
+ llvm_i32_ty, // coord_type_y
+ llvm_i32_ty, // coord_type_z
+ llvm_i32_ty // coord_type_w
+ ], [IntrNoMem]>;
+ class TextureIntrinsicInt32Input :
+ Intrinsic<[llvm_v4i32_ty], [
+ llvm_v4i32_ty, // Coord
+ llvm_i32_ty, // offset_x
+ llvm_i32_ty, // offset_y,
+ llvm_i32_ty, // offset_z,
+ llvm_i32_ty, // resource_id
+ llvm_i32_ty, // samplerid
+ llvm_i32_ty, // coord_type_x
+ llvm_i32_ty, // coord_type_y
+ llvm_i32_ty, // coord_type_z
+ llvm_i32_ty // coord_type_w
+ ], [IntrNoMem]>;
+
def int_R600_load_input :
Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_R600_interp_input :
Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_R600_interp_const :
+ Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_R600_interp_xy :
+ Intrinsic<[llvm_v2f32_ty], [llvm_i32_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
+def int_R600_interp_zw :
+ Intrinsic<[llvm_v2f32_ty], [llvm_i32_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
def int_R600_load_texbuf :
Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_R600_tex : TextureIntrinsicFloatInput;
+ def int_R600_texc : TextureIntrinsicFloatInput;
+ def int_R600_txl : TextureIntrinsicFloatInput;
+ def int_R600_txlc : TextureIntrinsicFloatInput;
+ def int_R600_txb : TextureIntrinsicFloatInput;
+ def int_R600_txbc : TextureIntrinsicFloatInput;
+ def int_R600_txf : TextureIntrinsicInt32Input;
+ def int_R600_ldptr : TextureIntrinsicInt32Input;
+ def int_R600_txq : TextureIntrinsicInt32Input;
+ def int_R600_ddx : TextureIntrinsicFloatInput;
+ def int_R600_ddy : TextureIntrinsicFloatInput;
def int_R600_store_swizzle :
Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], []>;
def int_R600_store_stream_output :
diff --git a/lib/Target/R600/R600MachineFunctionInfo.cpp b/lib/Target/R600/R600MachineFunctionInfo.cpp
index 018b403..01105c6 100644
--- a/lib/Target/R600/R600MachineFunctionInfo.cpp
+++ b/lib/Target/R600/R600MachineFunctionInfo.cpp
@@ -12,7 +12,9 @@
using namespace llvm;
-R600MachineFunctionInfo::R600MachineFunctionInfo(const MachineFunction &MF)
- : AMDGPUMachineFunction(MF) { }
+// Pin the vtable to this file.
+void R600MachineFunctionInfo::anchor() {}
+R600MachineFunctionInfo::R600MachineFunctionInfo(const MachineFunction &MF)
+ : AMDGPUMachineFunction(MF) { }
diff --git a/lib/Target/R600/R600MachineFunctionInfo.h b/lib/Target/R600/R600MachineFunctionInfo.h
index 70fddbb..c1bec0a 100644
--- a/lib/Target/R600/R600MachineFunctionInfo.h
+++ b/lib/Target/R600/R600MachineFunctionInfo.h
@@ -13,14 +13,15 @@
#ifndef R600MACHINEFUNCTIONINFO_H
#define R600MACHINEFUNCTIONINFO_H
+#include "AMDGPUMachineFunction.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "AMDGPUMachineFunction.h"
#include <vector>
namespace llvm {
class R600MachineFunctionInfo : public AMDGPUMachineFunction {
+ virtual void anchor();
public:
R600MachineFunctionInfo(const MachineFunction &MF);
SmallVector<unsigned, 4> LiveOuts;
diff --git a/lib/Target/R600/R600MachineScheduler.cpp b/lib/Target/R600/R600MachineScheduler.cpp
index a777142..da2a4d8 100644
--- a/lib/Target/R600/R600MachineScheduler.cpp
+++ b/lib/Target/R600/R600MachineScheduler.cpp
@@ -9,19 +9,17 @@
//
/// \file
/// \brief R600 Machine Scheduler interface
-// TODO: Scheduling is optimised for VLIW4 arch, modify it to support TRANS slot
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "misched"
#include "R600MachineScheduler.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
#include "llvm/Support/raw_ostream.h"
-#include <set>
using namespace llvm;
@@ -30,54 +28,80 @@ void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
DAG = dag;
TII = static_cast<const R600InstrInfo*>(DAG->TII);
TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
+ VLIW5 = !DAG->MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
MRI = &DAG->MRI;
- Available[IDAlu]->clear();
- Available[IDFetch]->clear();
- Available[IDOther]->clear();
CurInstKind = IDOther;
CurEmitted = 0;
- OccupedSlotsMask = 15;
+ OccupedSlotsMask = 31;
InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
-
+ InstKindLimit[IDOther] = 32;
const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD5XXX) {
- InstKindLimit[IDFetch] = 7; // 8 minus 1 for security
- } else {
- InstKindLimit[IDFetch] = 15; // 16 minus 1 for security
- }
+ InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
+ AluInstCount = 0;
+ FetchInstCount = 0;
}
-void R600SchedStrategy::MoveUnits(ReadyQueue *QSrc, ReadyQueue *QDst)
+void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc,
+ std::vector<SUnit *> &QDst)
{
- if (QSrc->empty())
- return;
- for (ReadyQueue::iterator I = QSrc->begin(),
- E = QSrc->end(); I != E; ++I) {
- (*I)->NodeQueueId &= ~QSrc->getID();
- QDst->push(*I);
- }
- QSrc->clear();
+ QDst.insert(QDst.end(), QSrc.begin(), QSrc.end());
+ QSrc.clear();
+}
+
+static
+unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
+ assert (GPRCount && "GPRCount cannot be 0");
+ return 248 / GPRCount;
}
SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
SUnit *SU = 0;
- IsTopNode = true;
NextInstKind = IDOther;
+ IsTopNode = false;
+
// check if we might want to switch current clause type
- bool AllowSwitchToAlu = (CurInstKind == IDOther) ||
- (CurEmitted > InstKindLimit[CurInstKind]) ||
- (Available[CurInstKind]->empty());
- bool AllowSwitchFromAlu = (CurEmitted > InstKindLimit[CurInstKind]) &&
- (!Available[IDFetch]->empty() || !Available[IDOther]->empty());
-
- if ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
- (!AllowSwitchFromAlu && CurInstKind == IDAlu)) {
+ bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) ||
+ (Available[CurInstKind].empty());
+ bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
+ (!Available[IDFetch].empty() || !Available[IDOther].empty());
+
+ if (CurInstKind == IDAlu && !Available[IDFetch].empty()) {
+ // We use the heuristic provided by AMD Accelerated Parallel Processing
+ // OpenCL Programming Guide :
+ // The approx. number of WF that allows TEX inst to hide ALU inst is :
+ // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU))
+ float ALUFetchRationEstimate =
+ (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) /
+ (FetchInstCount + Available[IDFetch].size());
+ unsigned NeededWF = 62.5f / ALUFetchRationEstimate;
+ DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" );
+ // We assume the local GPR requirements to be "dominated" by the requirement
+ // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and
+ // after TEX are indeed likely to consume or generate values from/for the
+ // TEX clause.
+ // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause
+ // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need
+ // one GPR) or TmXYZW = TnXYZW (need 2 GPR).
+ // (TODO : use RegisterPressure)
+ // If we are going too use too many GPR, we flush Fetch instruction to lower
+ // register pressure on 128 bits regs.
+ unsigned NearRegisterRequirement = 2 * Available[IDFetch].size();
+ if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement))
+ AllowSwitchFromAlu = true;
+ }
+
+ if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
+ (!AllowSwitchFromAlu && CurInstKind == IDAlu))) {
// try to pick ALU
SU = pickAlu();
+ if (!SU && !PhysicalRegCopy.empty()) {
+ SU = PhysicalRegCopy.front();
+ PhysicalRegCopy.erase(PhysicalRegCopy.begin());
+ }
if (SU) {
- if (CurEmitted > InstKindLimit[IDAlu])
+ if (CurEmitted >= InstKindLimit[IDAlu])
CurEmitted = 0;
NextInstKind = IDAlu;
}
@@ -99,14 +123,10 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
DEBUG(
if (SU) {
- dbgs() << "picked node: ";
+ dbgs() << " ** Pick node **\n";
SU->dump(DAG);
} else {
- dbgs() << "NO NODE ";
- for (int i = 0; i < IDLast; ++i) {
- Available[i]->dump();
- Pending[i]->dump();
- }
+ dbgs() << "NO NODE \n";
for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
const SUnit &S = DAG->SUnits[i];
if (!S.isScheduled)
@@ -119,19 +139,16 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
}
void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
-
- DEBUG(dbgs() << "scheduled: ");
- DEBUG(SU->dump(DAG));
-
if (NextInstKind != CurInstKind) {
DEBUG(dbgs() << "Instruction Type Switch\n");
if (NextInstKind != IDAlu)
- OccupedSlotsMask = 15;
+ OccupedSlotsMask |= 31;
CurEmitted = 0;
CurInstKind = NextInstKind;
}
if (CurInstKind == IDAlu) {
+ AluInstCount ++;
switch (getAluKind(SU)) {
case AluT_XYZW:
CurEmitted += 4;
@@ -157,20 +174,37 @@ void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
if (CurInstKind != IDFetch) {
MoveUnits(Pending[IDFetch], Available[IDFetch]);
- }
- MoveUnits(Pending[IDOther], Available[IDOther]);
+ } else
+ FetchInstCount++;
}
-void R600SchedStrategy::releaseTopNode(SUnit *SU) {
- int IK = getInstKind(SU);
+static bool
+isPhysicalRegCopy(MachineInstr *MI) {
+ if (MI->getOpcode() != AMDGPU::COPY)
+ return false;
- DEBUG(dbgs() << IK << " <= ");
- DEBUG(SU->dump(DAG));
+ return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
+}
- Pending[IK]->push(SU);
+void R600SchedStrategy::releaseTopNode(SUnit *SU) {
+ DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG););
}
void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
+ DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
+ if (isPhysicalRegCopy(SU->getInstr())) {
+ PhysicalRegCopy.push_back(SU);
+ return;
+ }
+
+ int IK = getInstKind(SU);
+
+ // There is no export clause, we can schedule one as soon as its ready
+ if (IK == IDOther)
+ Available[IDOther].push_back(SU);
+ else
+ Pending[IK].push_back(SU);
+
}
bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
@@ -185,18 +219,19 @@ bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
MachineInstr *MI = SU->getInstr();
+ if (TII->isTransOnly(MI))
+ return AluTrans;
+
switch (MI->getOpcode()) {
+ case AMDGPU::PRED_X:
+ return AluPredX;
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
+ case AMDGPU::DOT_4:
return AluT_XYZW;
case AMDGPU::COPY:
- if (TargetRegisterInfo::isPhysicalRegister(MI->getOperand(1).getReg())) {
- // %vregX = COPY Tn_X is likely to be discarded in favor of an
- // assignement of Tn_X to %vregX, don't considers it in scheduling
- return AluDiscarded;
- }
- else if (MI->getOperand(1).isUndef()) {
+ if (MI->getOperand(1).isUndef()) {
// MI will become a KILL, don't considers it in scheduling
return AluDiscarded;
}
@@ -205,10 +240,18 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
}
// Does the instruction take a whole IG ?
+ // XXX: Is it possible to add a helper function in R600InstrInfo that can
+ // be used here and in R600PacketizerList::isSoloInstruction() ?
if(TII->isVector(*MI) ||
TII->isCubeOp(MI->getOpcode()) ||
- TII->isReductionOp(MI->getOpcode()))
+ TII->isReductionOp(MI->getOpcode()) ||
+ MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
return AluT_XYZW;
+ }
+
+ if (TII->isLDSInstr(MI->getOpcode())) {
+ return AluT_X;
+ }
// Is the result already assigned to a channel ?
unsigned DestSubReg = MI->getOperand(0).getSubReg();
@@ -239,6 +282,10 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
return AluT_XYZW;
+ // LDS src registers cannot be used in the Trans slot.
+ if (TII->readsLDSSrcReg(MI))
+ return AluT_XYZW;
+
return AluAny;
}
@@ -246,57 +293,39 @@ R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
int R600SchedStrategy::getInstKind(SUnit* SU) {
int Opcode = SU->getInstr()->getOpcode();
+ if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode))
+ return IDFetch;
+
if (TII->isALUInstr(Opcode)) {
return IDAlu;
}
switch (Opcode) {
+ case AMDGPU::PRED_X:
case AMDGPU::COPY:
case AMDGPU::CONST_COPY:
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
- case AMDGPU::DOT4_eg_pseudo:
- case AMDGPU::DOT4_r600_pseudo:
+ case AMDGPU::DOT_4:
return IDAlu;
- case AMDGPU::TEX_VTX_CONSTBUF:
- case AMDGPU::TEX_VTX_TEXBUF:
- case AMDGPU::TEX_LD:
- case AMDGPU::TEX_GET_TEXTURE_RESINFO:
- case AMDGPU::TEX_GET_GRADIENTS_H:
- case AMDGPU::TEX_GET_GRADIENTS_V:
- case AMDGPU::TEX_SET_GRADIENTS_H:
- case AMDGPU::TEX_SET_GRADIENTS_V:
- case AMDGPU::TEX_SAMPLE:
- case AMDGPU::TEX_SAMPLE_C:
- case AMDGPU::TEX_SAMPLE_L:
- case AMDGPU::TEX_SAMPLE_C_L:
- case AMDGPU::TEX_SAMPLE_LB:
- case AMDGPU::TEX_SAMPLE_C_LB:
- case AMDGPU::TEX_SAMPLE_G:
- case AMDGPU::TEX_SAMPLE_C_G:
- case AMDGPU::TXD:
- case AMDGPU::TXD_SHADOW:
- return IDFetch;
default:
- DEBUG(
- dbgs() << "other inst: ";
- SU->dump(DAG);
- );
return IDOther;
}
}
-SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) {
+SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) {
if (Q.empty())
return NULL;
- for (std::set<SUnit *, CompareSUnit>::iterator It = Q.begin(), E = Q.end();
+ for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend();
It != E; ++It) {
SUnit *SU = *It;
InstructionsGroupCandidate.push_back(SU->getInstr());
- if (TII->canBundle(InstructionsGroupCandidate)) {
+ if (TII->fitsConstReadLimitations(InstructionsGroupCandidate)
+ && (!AnyALU || !TII->isVectorOnly(SU->getInstr()))
+ ) {
InstructionsGroupCandidate.pop_back();
- Q.erase(It);
+ Q.erase((It + 1).base());
return SU;
} else {
InstructionsGroupCandidate.pop_back();
@@ -306,33 +335,37 @@ SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) {
}
void R600SchedStrategy::LoadAlu() {
- ReadyQueue *QSrc = Pending[IDAlu];
- for (ReadyQueue::iterator I = QSrc->begin(),
- E = QSrc->end(); I != E; ++I) {
- (*I)->NodeQueueId &= ~QSrc->getID();
- AluKind AK = getAluKind(*I);
- AvailableAlus[AK].insert(*I);
- }
- QSrc->clear();
+ std::vector<SUnit *> &QSrc = Pending[IDAlu];
+ for (unsigned i = 0, e = QSrc.size(); i < e; ++i) {
+ AluKind AK = getAluKind(QSrc[i]);
+ AvailableAlus[AK].push_back(QSrc[i]);
+ }
+ QSrc.clear();
}
void R600SchedStrategy::PrepareNextSlot() {
DEBUG(dbgs() << "New Slot\n");
assert (OccupedSlotsMask && "Slot wasn't filled");
OccupedSlotsMask = 0;
+// if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS)
+// OccupedSlotsMask |= 16;
InstructionsGroupCandidate.clear();
LoadAlu();
}
void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
- unsigned DestReg = MI->getOperand(0).getReg();
+ int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ if (DstIndex == -1) {
+ return;
+ }
+ unsigned DestReg = MI->getOperand(DstIndex).getReg();
// PressureRegister crashes if an operand is def and used in the same inst
// and we try to constraint its regclass
for (MachineInstr::mop_iterator It = MI->operands_begin(),
E = MI->operands_end(); It != E; ++It) {
MachineOperand &MO = *It;
if (MO.isReg() && !MO.isDef() &&
- MO.getReg() == MI->getOperand(0).getReg())
+ MO.getReg() == DestReg)
return;
}
// Constrains the regclass of DestReg to assign it to Slot
@@ -352,53 +385,60 @@ void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
}
}
-SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) {
+SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot, bool AnyAlu) {
static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W};
- SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]);
- SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]);
- if (!UnslotedSU) {
+ SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]], AnyAlu);
+ if (SlotedSU)
return SlotedSU;
- } else if (!SlotedSU) {
+ SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny], AnyAlu);
+ if (UnslotedSU)
AssignSlot(UnslotedSU->getInstr(), Slot);
- return UnslotedSU;
- } else {
- //Determine which one to pick (the lesser one)
- if (CompareSUnit()(SlotedSU, UnslotedSU)) {
- AvailableAlus[AluAny].insert(UnslotedSU);
- return SlotedSU;
- } else {
- AvailableAlus[IndexToID[Slot]].insert(SlotedSU);
- AssignSlot(UnslotedSU->getInstr(), Slot);
- return UnslotedSU;
- }
- }
+ return UnslotedSU;
}
-bool R600SchedStrategy::isAvailablesAluEmpty() const {
- return Pending[IDAlu]->empty() && AvailableAlus[AluAny].empty() &&
- AvailableAlus[AluT_XYZW].empty() && AvailableAlus[AluT_X].empty() &&
- AvailableAlus[AluT_Y].empty() && AvailableAlus[AluT_Z].empty() &&
- AvailableAlus[AluT_W].empty() && AvailableAlus[AluDiscarded].empty();
+unsigned R600SchedStrategy::AvailablesAluCount() const {
+ return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() +
+ AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() +
+ AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() +
+ AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() +
+ AvailableAlus[AluPredX].size();
}
SUnit* R600SchedStrategy::pickAlu() {
- while (!isAvailablesAluEmpty()) {
+ while (AvailablesAluCount() || !Pending[IDAlu].empty()) {
if (!OccupedSlotsMask) {
+ // Bottom up scheduling : predX must comes first
+ if (!AvailableAlus[AluPredX].empty()) {
+ OccupedSlotsMask |= 31;
+ return PopInst(AvailableAlus[AluPredX], false);
+ }
// Flush physical reg copies (RA will discard them)
if (!AvailableAlus[AluDiscarded].empty()) {
- OccupedSlotsMask = 15;
- return PopInst(AvailableAlus[AluDiscarded]);
+ OccupedSlotsMask |= 31;
+ return PopInst(AvailableAlus[AluDiscarded], false);
}
// If there is a T_XYZW alu available, use it
if (!AvailableAlus[AluT_XYZW].empty()) {
- OccupedSlotsMask = 15;
- return PopInst(AvailableAlus[AluT_XYZW]);
+ OccupedSlotsMask |= 15;
+ return PopInst(AvailableAlus[AluT_XYZW], false);
+ }
+ }
+ bool TransSlotOccuped = OccupedSlotsMask & 16;
+ if (!TransSlotOccuped && VLIW5) {
+ if (!AvailableAlus[AluTrans].empty()) {
+ OccupedSlotsMask |= 16;
+ return PopInst(AvailableAlus[AluTrans], false);
+ }
+ SUnit *SU = AttemptFillSlot(3, true);
+ if (SU) {
+ OccupedSlotsMask |= 16;
+ return SU;
}
}
- for (unsigned Chan = 0; Chan < 4; ++Chan) {
+ for (int Chan = 3; Chan > -1; --Chan) {
bool isOccupied = OccupedSlotsMask & (1 << Chan);
if (!isOccupied) {
- SUnit *SU = AttemptFillSlot(Chan);
+ SUnit *SU = AttemptFillSlot(Chan, false);
if (SU) {
OccupedSlotsMask |= (1 << Chan);
InstructionsGroupCandidate.push_back(SU->getInstr());
@@ -413,14 +453,14 @@ SUnit* R600SchedStrategy::pickAlu() {
SUnit* R600SchedStrategy::pickOther(int QID) {
SUnit *SU = 0;
- ReadyQueue *AQ = Available[QID];
+ std::vector<SUnit *> &AQ = Available[QID];
- if (AQ->empty()) {
+ if (AQ.empty()) {
MoveUnits(Pending[QID], AQ);
}
- if (!AQ->empty()) {
- SU = *AQ->begin();
- AQ->remove(AQ->begin());
+ if (!AQ.empty()) {
+ SU = AQ.back();
+ AQ.resize(AQ.size() - 1);
}
return SU;
}
diff --git a/lib/Target/R600/R600MachineScheduler.h b/lib/Target/R600/R600MachineScheduler.h
index 3d0367f..97c8cde 100644
--- a/lib/Target/R600/R600MachineScheduler.h
+++ b/lib/Target/R600/R600MachineScheduler.h
@@ -16,21 +16,14 @@
#define R600MACHINESCHEDULER_H_
#include "R600InstrInfo.h"
+#include "llvm/ADT/PriorityQueue.h"
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/Support/Debug.h"
-#include "llvm/ADT/PriorityQueue.h"
using namespace llvm;
namespace llvm {
-class CompareSUnit {
-public:
- bool operator()(const SUnit *S1, const SUnit *S2) {
- return S1->getDepth() > S2->getDepth();
- }
-};
-
class R600SchedStrategy : public MachineSchedStrategy {
const ScheduleDAGMI *DAG;
@@ -38,12 +31,6 @@ class R600SchedStrategy : public MachineSchedStrategy {
const R600RegisterInfo *TRI;
MachineRegisterInfo *MRI;
- enum InstQueue {
- QAlu = 1,
- QFetch = 2,
- QOther = 4
- };
-
enum InstKind {
IDAlu,
IDFetch,
@@ -58,17 +45,23 @@ class R600SchedStrategy : public MachineSchedStrategy {
AluT_Z,
AluT_W,
AluT_XYZW,
+ AluPredX,
+ AluTrans,
AluDiscarded, // LLVM Instructions that are going to be eliminated
AluLast
};
- ReadyQueue *Available[IDLast], *Pending[IDLast];
- std::multiset<SUnit *, CompareSUnit> AvailableAlus[AluLast];
+ std::vector<SUnit *> Available[IDLast], Pending[IDLast];
+ std::vector<SUnit *> AvailableAlus[AluLast];
+ std::vector<SUnit *> PhysicalRegCopy;
InstKind CurInstKind;
int CurEmitted;
InstKind NextInstKind;
+ unsigned AluInstCount;
+ unsigned FetchInstCount;
+
int InstKindLimit[IDLast];
int OccupedSlotsMask;
@@ -76,19 +69,9 @@ class R600SchedStrategy : public MachineSchedStrategy {
public:
R600SchedStrategy() :
DAG(0), TII(0), TRI(0), MRI(0) {
- Available[IDAlu] = new ReadyQueue(QAlu, "AAlu");
- Available[IDFetch] = new ReadyQueue(QFetch, "AFetch");
- Available[IDOther] = new ReadyQueue(QOther, "AOther");
- Pending[IDAlu] = new ReadyQueue(QAlu<<4, "PAlu");
- Pending[IDFetch] = new ReadyQueue(QFetch<<4, "PFetch");
- Pending[IDOther] = new ReadyQueue(QOther<<4, "POther");
}
virtual ~R600SchedStrategy() {
- for (unsigned I = 0; I < IDLast; ++I) {
- delete Available[I];
- delete Pending[I];
- }
}
virtual void initialize(ScheduleDAGMI *dag);
@@ -99,20 +82,21 @@ public:
private:
std::vector<MachineInstr *> InstructionsGroupCandidate;
+ bool VLIW5;
int getInstKind(SUnit *SU);
bool regBelongsToClass(unsigned Reg, const TargetRegisterClass *RC) const;
AluKind getAluKind(SUnit *SU) const;
void LoadAlu();
- bool isAvailablesAluEmpty() const;
- SUnit *AttemptFillSlot (unsigned Slot);
+ unsigned AvailablesAluCount() const;
+ SUnit *AttemptFillSlot (unsigned Slot, bool AnyAlu);
void PrepareNextSlot();
- SUnit *PopInst(std::multiset<SUnit *, CompareSUnit> &Q);
+ SUnit *PopInst(std::vector<SUnit*> &Q, bool AnyALU);
void AssignSlot(MachineInstr *MI, unsigned Slot);
SUnit* pickAlu();
SUnit* pickOther(int QID);
- void MoveUnits(ReadyQueue *QSrc, ReadyQueue *QDst);
+ void MoveUnits(std::vector<SUnit *> &QSrc, std::vector<SUnit *> &QDst);
};
} // namespace llvm
diff --git a/lib/Target/R600/R600OptimizeVectorRegisters.cpp b/lib/Target/R600/R600OptimizeVectorRegisters.cpp
new file mode 100644
index 0000000..cf719c0
--- /dev/null
+++ b/lib/Target/R600/R600OptimizeVectorRegisters.cpp
@@ -0,0 +1,380 @@
+//===--------------------- R600MergeVectorRegisters.cpp -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This pass merges inputs of swizzeable instructions into vector sharing
+/// common data and/or have enough undef subreg using swizzle abilities.
+///
+/// For instance let's consider the following pseudo code :
+/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
+/// ...
+/// vreg7<def> = REG_SEQ vreg1, sub0, vreg3, sub1, undef, sub2, vreg4, sub3
+/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub1, sub2, sub3
+///
+/// is turned into :
+/// vreg5<def> = REG_SEQ vreg1, sub0, vreg2, sub1, vreg3, sub2, undef, sub3
+/// ...
+/// vreg7<def> = INSERT_SUBREG vreg4, sub3
+/// (swizzable Inst) vreg7, SwizzleMask : sub0, sub2, sub1, sub3
+///
+/// This allow regalloc to reduce register pressure for vector registers and
+/// to reduce MOV count.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "vec-merger"
+#include "llvm/Support/Debug.h"
+#include "AMDGPU.h"
+#include "R600InstrInfo.h"
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+namespace {
+
+static bool
+isImplicitlyDef(MachineRegisterInfo &MRI, unsigned Reg) {
+ for (MachineRegisterInfo::def_iterator It = MRI.def_begin(Reg),
+ E = MRI.def_end(); It != E; ++It) {
+ return (*It).isImplicitDef();
+ }
+ if (MRI.isReserved(Reg)) {
+ return false;
+ }
+ llvm_unreachable("Reg without a def");
+ return false;
+}
+
+class RegSeqInfo {
+public:
+ MachineInstr *Instr;
+ DenseMap<unsigned, unsigned> RegToChan;
+ std::vector<unsigned> UndefReg;
+ RegSeqInfo(MachineRegisterInfo &MRI, MachineInstr *MI) : Instr(MI) {
+ assert (MI->getOpcode() == AMDGPU::REG_SEQUENCE);
+ for (unsigned i = 1, e = Instr->getNumOperands(); i < e; i+=2) {
+ MachineOperand &MO = Instr->getOperand(i);
+ unsigned Chan = Instr->getOperand(i + 1).getImm();
+ if (isImplicitlyDef(MRI, MO.getReg()))
+ UndefReg.push_back(Chan);
+ else
+ RegToChan[MO.getReg()] = Chan;
+ }
+ }
+ RegSeqInfo() {}
+
+ bool operator==(const RegSeqInfo &RSI) const {
+ return RSI.Instr == Instr;
+ }
+};
+
+class R600VectorRegMerger : public MachineFunctionPass {
+private:
+ MachineRegisterInfo *MRI;
+ const R600InstrInfo *TII;
+ bool canSwizzle(const MachineInstr &) const;
+ bool areAllUsesSwizzeable(unsigned Reg) const;
+ void SwizzleInput(MachineInstr &,
+ const std::vector<std::pair<unsigned, unsigned> > &) const;
+ bool tryMergeVector(const RegSeqInfo *, RegSeqInfo *,
+ std::vector<std::pair<unsigned, unsigned> > &Remap) const;
+ bool tryMergeUsingCommonSlot(RegSeqInfo &RSI, RegSeqInfo &CompatibleRSI,
+ std::vector<std::pair<unsigned, unsigned> > &RemapChan);
+ bool tryMergeUsingFreeSlot(RegSeqInfo &RSI, RegSeqInfo &CompatibleRSI,
+ std::vector<std::pair<unsigned, unsigned> > &RemapChan);
+ MachineInstr *RebuildVector(RegSeqInfo *MI,
+ const RegSeqInfo *BaseVec,
+ const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const;
+ void RemoveMI(MachineInstr *);
+ void trackRSI(const RegSeqInfo &RSI);
+
+ typedef DenseMap<unsigned, std::vector<MachineInstr *> > InstructionSetMap;
+ DenseMap<MachineInstr *, RegSeqInfo> PreviousRegSeq;
+ InstructionSetMap PreviousRegSeqByReg;
+ InstructionSetMap PreviousRegSeqByUndefCount;
+public:
+ static char ID;
+ R600VectorRegMerger(TargetMachine &tm) : MachineFunctionPass(ID),
+ TII(0) { }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ const char *getPassName() const {
+ return "R600 Vector Registers Merge Pass";
+ }
+
+ bool runOnMachineFunction(MachineFunction &Fn);
+};
+
+char R600VectorRegMerger::ID = 0;
+
+bool R600VectorRegMerger::canSwizzle(const MachineInstr &MI)
+ const {
+ if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST)
+ return true;
+ switch (MI.getOpcode()) {
+ case AMDGPU::R600_ExportSwz:
+ case AMDGPU::EG_ExportSwz:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool R600VectorRegMerger::tryMergeVector(const RegSeqInfo *Untouched,
+ RegSeqInfo *ToMerge, std::vector< std::pair<unsigned, unsigned> > &Remap)
+ const {
+ unsigned CurrentUndexIdx = 0;
+ for (DenseMap<unsigned, unsigned>::iterator It = ToMerge->RegToChan.begin(),
+ E = ToMerge->RegToChan.end(); It != E; ++It) {
+ DenseMap<unsigned, unsigned>::const_iterator PosInUntouched =
+ Untouched->RegToChan.find((*It).first);
+ if (PosInUntouched != Untouched->RegToChan.end()) {
+ Remap.push_back(std::pair<unsigned, unsigned>
+ ((*It).second, (*PosInUntouched).second));
+ continue;
+ }
+ if (CurrentUndexIdx >= Untouched->UndefReg.size())
+ return false;
+ Remap.push_back(std::pair<unsigned, unsigned>
+ ((*It).second, Untouched->UndefReg[CurrentUndexIdx++]));
+ }
+
+ return true;
+}
+
+static
+unsigned getReassignedChan(
+ const std::vector<std::pair<unsigned, unsigned> > &RemapChan,
+ unsigned Chan) {
+ for (unsigned j = 0, je = RemapChan.size(); j < je; j++) {
+ if (RemapChan[j].first == Chan)
+ return RemapChan[j].second;
+ }
+ llvm_unreachable("Chan wasn't reassigned");
+}
+
+MachineInstr *R600VectorRegMerger::RebuildVector(
+ RegSeqInfo *RSI, const RegSeqInfo *BaseRSI,
+ const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const {
+ unsigned Reg = RSI->Instr->getOperand(0).getReg();
+ MachineBasicBlock::iterator Pos = RSI->Instr;
+ MachineBasicBlock &MBB = *Pos->getParent();
+ DebugLoc DL = Pos->getDebugLoc();
+
+ unsigned SrcVec = BaseRSI->Instr->getOperand(0).getReg();
+ DenseMap<unsigned, unsigned> UpdatedRegToChan = BaseRSI->RegToChan;
+ std::vector<unsigned> UpdatedUndef = BaseRSI->UndefReg;
+ for (DenseMap<unsigned, unsigned>::iterator It = RSI->RegToChan.begin(),
+ E = RSI->RegToChan.end(); It != E; ++It) {
+ unsigned DstReg = MRI->createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
+ unsigned SubReg = (*It).first;
+ unsigned Swizzle = (*It).second;
+ unsigned Chan = getReassignedChan(RemapChan, Swizzle);
+
+ MachineInstr *Tmp = BuildMI(MBB, Pos, DL, TII->get(AMDGPU::INSERT_SUBREG),
+ DstReg)
+ .addReg(SrcVec)
+ .addReg(SubReg)
+ .addImm(Chan);
+ UpdatedRegToChan[SubReg] = Chan;
+ std::vector<unsigned>::iterator ChanPos =
+ std::find(UpdatedUndef.begin(), UpdatedUndef.end(), Chan);
+ if (ChanPos != UpdatedUndef.end())
+ UpdatedUndef.erase(ChanPos);
+ assert(std::find(UpdatedUndef.begin(), UpdatedUndef.end(), Chan) ==
+ UpdatedUndef.end() &&
+ "UpdatedUndef shouldn't contain Chan more than once!");
+ DEBUG(dbgs() << " ->"; Tmp->dump(););
+ (void)Tmp;
+ SrcVec = DstReg;
+ }
+ Pos = BuildMI(MBB, Pos, DL, TII->get(AMDGPU::COPY), Reg)
+ .addReg(SrcVec);
+ DEBUG(dbgs() << " ->"; Pos->dump(););
+
+ DEBUG(dbgs() << " Updating Swizzle:\n");
+ for (MachineRegisterInfo::use_iterator It = MRI->use_begin(Reg),
+ E = MRI->use_end(); It != E; ++It) {
+ DEBUG(dbgs() << " ";(*It).dump(); dbgs() << " ->");
+ SwizzleInput(*It, RemapChan);
+ DEBUG((*It).dump());
+ }
+ RSI->Instr->eraseFromParent();
+
+ // Update RSI
+ RSI->Instr = Pos;
+ RSI->RegToChan = UpdatedRegToChan;
+ RSI->UndefReg = UpdatedUndef;
+
+ return Pos;
+}
+
+void R600VectorRegMerger::RemoveMI(MachineInstr *MI) {
+ for (InstructionSetMap::iterator It = PreviousRegSeqByReg.begin(),
+ E = PreviousRegSeqByReg.end(); It != E; ++It) {
+ std::vector<MachineInstr *> &MIs = (*It).second;
+ MIs.erase(std::find(MIs.begin(), MIs.end(), MI), MIs.end());
+ }
+ for (InstructionSetMap::iterator It = PreviousRegSeqByUndefCount.begin(),
+ E = PreviousRegSeqByUndefCount.end(); It != E; ++It) {
+ std::vector<MachineInstr *> &MIs = (*It).second;
+ MIs.erase(std::find(MIs.begin(), MIs.end(), MI), MIs.end());
+ }
+}
+
+void R600VectorRegMerger::SwizzleInput(MachineInstr &MI,
+ const std::vector<std::pair<unsigned, unsigned> > &RemapChan) const {
+ unsigned Offset;
+ if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST)
+ Offset = 2;
+ else
+ Offset = 3;
+ for (unsigned i = 0; i < 4; i++) {
+ unsigned Swizzle = MI.getOperand(i + Offset).getImm() + 1;
+ for (unsigned j = 0, e = RemapChan.size(); j < e; j++) {
+ if (RemapChan[j].first == Swizzle) {
+ MI.getOperand(i + Offset).setImm(RemapChan[j].second - 1);
+ break;
+ }
+ }
+ }
+}
+
+bool R600VectorRegMerger::areAllUsesSwizzeable(unsigned Reg) const {
+ for (MachineRegisterInfo::use_iterator It = MRI->use_begin(Reg),
+ E = MRI->use_end(); It != E; ++It) {
+ if (!canSwizzle(*It))
+ return false;
+ }
+ return true;
+}
+
+bool R600VectorRegMerger::tryMergeUsingCommonSlot(RegSeqInfo &RSI,
+ RegSeqInfo &CompatibleRSI,
+ std::vector<std::pair<unsigned, unsigned> > &RemapChan) {
+ for (MachineInstr::mop_iterator MOp = RSI.Instr->operands_begin(),
+ MOE = RSI.Instr->operands_end(); MOp != MOE; ++MOp) {
+ if (!MOp->isReg())
+ continue;
+ if (PreviousRegSeqByReg[MOp->getReg()].empty())
+ continue;
+ std::vector<MachineInstr *> MIs = PreviousRegSeqByReg[MOp->getReg()];
+ for (unsigned i = 0, e = MIs.size(); i < e; i++) {
+ CompatibleRSI = PreviousRegSeq[MIs[i]];
+ if (RSI == CompatibleRSI)
+ continue;
+ if (tryMergeVector(&CompatibleRSI, &RSI, RemapChan))
+ return true;
+ }
+ }
+ return false;
+}
+
+bool R600VectorRegMerger::tryMergeUsingFreeSlot(RegSeqInfo &RSI,
+ RegSeqInfo &CompatibleRSI,
+ std::vector<std::pair<unsigned, unsigned> > &RemapChan) {
+ unsigned NeededUndefs = 4 - RSI.UndefReg.size();
+ if (PreviousRegSeqByUndefCount[NeededUndefs].empty())
+ return false;
+ std::vector<MachineInstr *> &MIs =
+ PreviousRegSeqByUndefCount[NeededUndefs];
+ CompatibleRSI = PreviousRegSeq[MIs.back()];
+ tryMergeVector(&CompatibleRSI, &RSI, RemapChan);
+ return true;
+}
+
+void R600VectorRegMerger::trackRSI(const RegSeqInfo &RSI) {
+ for (DenseMap<unsigned, unsigned>::const_iterator
+ It = RSI.RegToChan.begin(), E = RSI.RegToChan.end(); It != E; ++It) {
+ PreviousRegSeqByReg[(*It).first].push_back(RSI.Instr);
+ }
+ PreviousRegSeqByUndefCount[RSI.UndefReg.size()].push_back(RSI.Instr);
+ PreviousRegSeq[RSI.Instr] = RSI;
+}
+
+bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
+ TII = static_cast<const R600InstrInfo *>(Fn.getTarget().getInstrInfo());
+ MRI = &(Fn.getRegInfo());
+ for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
+ MBB != MBBe; ++MBB) {
+ MachineBasicBlock *MB = MBB;
+ PreviousRegSeq.clear();
+ PreviousRegSeqByReg.clear();
+ PreviousRegSeqByUndefCount.clear();
+
+ for (MachineBasicBlock::iterator MII = MB->begin(), MIIE = MB->end();
+ MII != MIIE; ++MII) {
+ MachineInstr *MI = MII;
+ if (MI->getOpcode() != AMDGPU::REG_SEQUENCE) {
+ if (TII->get(MI->getOpcode()).TSFlags & R600_InstFlag::TEX_INST) {
+ unsigned Reg = MI->getOperand(1).getReg();
+ for (MachineRegisterInfo::def_iterator It = MRI->def_begin(Reg),
+ E = MRI->def_end(); It != E; ++It) {
+ RemoveMI(&(*It));
+ }
+ }
+ continue;
+ }
+
+
+ RegSeqInfo RSI(*MRI, MI);
+
+ // All uses of MI are swizzeable ?
+ unsigned Reg = MI->getOperand(0).getReg();
+ if (!areAllUsesSwizzeable(Reg))
+ continue;
+
+ DEBUG (dbgs() << "Trying to optimize ";
+ MI->dump();
+ );
+
+ RegSeqInfo CandidateRSI;
+ std::vector<std::pair<unsigned, unsigned> > RemapChan;
+ DEBUG(dbgs() << "Using common slots...\n";);
+ if (tryMergeUsingCommonSlot(RSI, CandidateRSI, RemapChan)) {
+ // Remove CandidateRSI mapping
+ RemoveMI(CandidateRSI.Instr);
+ MII = RebuildVector(&RSI, &CandidateRSI, RemapChan);
+ trackRSI(RSI);
+ continue;
+ }
+ DEBUG(dbgs() << "Using free slots...\n";);
+ RemapChan.clear();
+ if (tryMergeUsingFreeSlot(RSI, CandidateRSI, RemapChan)) {
+ RemoveMI(CandidateRSI.Instr);
+ MII = RebuildVector(&RSI, &CandidateRSI, RemapChan);
+ trackRSI(RSI);
+ continue;
+ }
+ //Failed to merge
+ trackRSI(RSI);
+ }
+ }
+ return false;
+}
+
+}
+
+llvm::FunctionPass *llvm::createR600VectorRegMerger(TargetMachine &tm) {
+ return new R600VectorRegMerger(tm);
+}
diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp
index cd7b7d0..cd9b6ea 100644
--- a/lib/Target/R600/R600Packetizer.cpp
+++ b/lib/Target/R600/R600Packetizer.cpp
@@ -14,22 +14,21 @@
//
//===----------------------------------------------------------------------===//
-#ifndef R600PACKETIZER_CPP
-#define R600PACKETIZER_CPP
-
#define DEBUG_TYPE "packets"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
+#include "AMDGPU.h"
+#include "R600InstrInfo.h"
#include "llvm/CodeGen/DFAPacketizer.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/ScheduleDAG.h"
-#include "AMDGPU.h"
-#include "R600InstrInfo.h"
+#include "llvm/Support/raw_ostream.h"
-namespace llvm {
+using namespace llvm;
+
+namespace {
class R600Packetizer : public MachineFunctionPass {
@@ -59,15 +58,8 @@ class R600PacketizerList : public VLIWPacketizerList {
private:
const R600InstrInfo *TII;
const R600RegisterInfo &TRI;
-
- enum BankSwizzle {
- ALU_VEC_012 = 0,
- ALU_VEC_021,
- ALU_VEC_120,
- ALU_VEC_102,
- ALU_VEC_201,
- ALU_VEC_210
- };
+ bool VLIW5;
+ bool ConsideredInstUsesAlreadyWrittenVectorElement;
unsigned getSlot(const MachineInstr *MI) const {
return TRI.getHWRegChan(MI->getOperand(0).getReg());
@@ -84,21 +76,35 @@ private:
MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
if (I->isBundle())
BI++;
+ int LastDstChan = -1;
do {
+ bool isTrans = false;
+ int BISlot = getSlot(BI);
+ if (LastDstChan >= BISlot)
+ isTrans = true;
+ LastDstChan = BISlot;
if (TII->isPredicated(BI))
continue;
- if (TII->isTransOnly(BI))
+ int OperandIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::write);
+ if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm() == 0)
continue;
- int OperandIdx = TII->getOperandIdx(BI->getOpcode(), R600Operands::WRITE);
- if (OperandIdx < 0)
+ int DstIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::dst);
+ if (DstIdx == -1) {
continue;
- if (BI->getOperand(OperandIdx).getImm() == 0)
+ }
+ unsigned Dst = BI->getOperand(DstIdx).getReg();
+ if (isTrans || TII->isTransOnly(BI)) {
+ Result[Dst] = AMDGPU::PS;
continue;
- unsigned Dst = BI->getOperand(0).getReg();
- if (BI->getOpcode() == AMDGPU::DOT4_r600_real) {
+ }
+ if (BI->getOpcode() == AMDGPU::DOT4_r600 ||
+ BI->getOpcode() == AMDGPU::DOT4_eg) {
Result[Dst] = AMDGPU::PV_X;
continue;
}
+ if (Dst == AMDGPU::OQAP) {
+ continue;
+ }
unsigned PVReg = 0;
switch (TRI.getHWRegChan(Dst)) {
case 0:
@@ -123,10 +129,10 @@ private:
void substitutePV(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PVs)
const {
- R600Operands::Ops Ops[] = {
- R600Operands::SRC0,
- R600Operands::SRC1,
- R600Operands::SRC2
+ unsigned Ops[] = {
+ AMDGPU::OpName::src0,
+ AMDGPU::OpName::src1,
+ AMDGPU::OpName::src2
};
for (unsigned i = 0; i < 3; i++) {
int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]);
@@ -144,10 +150,14 @@ public:
MachineDominatorTree &MDT)
: VLIWPacketizerList(MF, MLI, MDT, true),
TII (static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo())),
- TRI(TII->getRegisterInfo()) { }
+ TRI(TII->getRegisterInfo()) {
+ VLIW5 = !MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
+ }
// initPacketizerState - initialize some internal flags.
- void initPacketizerState() { }
+ void initPacketizerState() {
+ ConsideredInstUsesAlreadyWrittenVectorElement = false;
+ }
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
bool ignorePseudoInstruction(MachineInstr *MI, MachineBasicBlock *MBB) {
@@ -161,9 +171,11 @@ public:
return true;
if (!TII->isALUInstr(MI->getOpcode()))
return true;
- if (TII->get(MI->getOpcode()).TSFlags & R600_InstFlag::TRANS_ONLY)
+ if (MI->getOpcode() == AMDGPU::GROUP_BARRIER)
return true;
- if (TII->isTransOnly(MI))
+ // XXX: This can be removed once the packetizer properly handles all the
+ // LDS instruction group restrictions.
+ if (TII->isLDSInstr(MI->getOpcode()))
return true;
return false;
}
@@ -172,11 +184,11 @@ public:
// together.
bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
MachineInstr *MII = SUI->getInstr(), *MIJ = SUJ->getInstr();
- if (getSlot(MII) <= getSlot(MIJ))
- return false;
+ if (getSlot(MII) == getSlot(MIJ))
+ ConsideredInstUsesAlreadyWrittenVectorElement = true;
// Does MII and MIJ share the same pred_sel ?
- int OpI = TII->getOperandIdx(MII->getOpcode(), R600Operands::PRED_SEL),
- OpJ = TII->getOperandIdx(MIJ->getOpcode(), R600Operands::PRED_SEL);
+ int OpI = TII->getOperandIdx(MII->getOpcode(), AMDGPU::OpName::pred_sel),
+ OpJ = TII->getOperandIdx(MIJ->getOpcode(), AMDGPU::OpName::pred_sel);
unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0,
PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0;
if (PredI != PredJ)
@@ -194,6 +206,14 @@ public:
return false;
}
}
+
+ bool ARDef = TII->definesAddressRegister(MII) ||
+ TII->definesAddressRegister(MIJ);
+ bool ARUse = TII->usesAddressRegister(MII) ||
+ TII->usesAddressRegister(MIJ);
+ if (ARDef && ARUse)
+ return false;
+
return true;
}
@@ -202,15 +222,34 @@ public:
bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {return false;}
void setIsLastBit(MachineInstr *MI, unsigned Bit) const {
- unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), R600Operands::LAST);
+ unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::last);
MI->getOperand(LastOp).setImm(Bit);
}
- MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
+ bool isBundlableWithCurrentPMI(MachineInstr *MI,
+ const DenseMap<unsigned, unsigned> &PV,
+ std::vector<R600InstrInfo::BankSwizzle> &BS,
+ bool &isTransSlot) {
+ isTransSlot = TII->isTransOnly(MI);
+ assert (!isTransSlot || VLIW5);
+
+ // Is the dst reg sequence legal ?
+ if (!isTransSlot && !CurrentPacketMIs.empty()) {
+ if (getSlot(MI) <= getSlot(CurrentPacketMIs.back())) {
+ if (ConsideredInstUsesAlreadyWrittenVectorElement &&
+ !TII->isVectorOnly(MI) && VLIW5) {
+ isTransSlot = true;
+ DEBUG(dbgs() << "Considering as Trans Inst :"; MI->dump(););
+ }
+ else
+ return false;
+ }
+ }
+
+ // Are the Constants limitations met ?
CurrentPacketMIs.push_back(MI);
- bool FitsConstLimits = TII->canBundle(CurrentPacketMIs);
- DEBUG(
- if (!FitsConstLimits) {
+ if (!TII->fitsConstReadLimitations(CurrentPacketMIs)) {
+ DEBUG(
dbgs() << "Couldn't pack :\n";
MI->dump();
dbgs() << "with the following packets :\n";
@@ -219,12 +258,15 @@ public:
dbgs() << "\n";
}
dbgs() << "because of Consts read limitations\n";
- });
- const DenseMap<unsigned, unsigned> &PV =
- getPreviousVector(CurrentPacketMIs.front());
- bool FitsReadPortLimits = fitsReadPortLimitation(CurrentPacketMIs, PV);
- DEBUG(
- if (!FitsReadPortLimits) {
+ );
+ CurrentPacketMIs.pop_back();
+ return false;
+ }
+
+ // Is there a BankSwizzle set that meet Read Port limitations ?
+ if (!TII->fitsReadPortLimitations(CurrentPacketMIs,
+ PV, BS, isTransSlot)) {
+ DEBUG(
dbgs() << "Couldn't pack :\n";
MI->dump();
dbgs() << "with the following packets :\n";
@@ -233,146 +275,50 @@ public:
dbgs() << "\n";
}
dbgs() << "because of Read port limitations\n";
- });
- bool isBundlable = FitsConstLimits && FitsReadPortLimits;
- CurrentPacketMIs.pop_back();
- if (!isBundlable) {
- endPacket(MI->getParent(), MI);
- substitutePV(MI, getPreviousVector(MI));
- return VLIWPacketizerList::addToPacket(MI);
- }
- if (!CurrentPacketMIs.empty())
- setIsLastBit(CurrentPacketMIs.back(), 0);
- substitutePV(MI, PV);
- return VLIWPacketizerList::addToPacket(MI);
- }
-private:
- std::vector<std::pair<int, unsigned> >
- ExtractSrcs(const MachineInstr *MI, const DenseMap<unsigned, unsigned> &PV)
- const {
- R600Operands::Ops Ops[] = {
- R600Operands::SRC0,
- R600Operands::SRC1,
- R600Operands::SRC2
- };
- std::vector<std::pair<int, unsigned> > Result;
- for (unsigned i = 0; i < 3; i++) {
- int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]);
- if (OperandIdx < 0){
- Result.push_back(std::pair<int, unsigned>(-1,0));
- continue;
- }
- unsigned Src = MI->getOperand(OperandIdx).getReg();
- if (PV.find(Src) != PV.end()) {
- Result.push_back(std::pair<int, unsigned>(-1,0));
- continue;
- }
- unsigned Reg = TRI.getEncodingValue(Src) & 0xff;
- if (Reg > 127) {
- Result.push_back(std::pair<int, unsigned>(-1,0));
- continue;
- }
- unsigned Chan = TRI.getHWRegChan(Src);
- Result.push_back(std::pair<int, unsigned>(Reg, Chan));
+ );
+ CurrentPacketMIs.pop_back();
+ return false;
}
- return Result;
- }
- std::vector<std::pair<int, unsigned> >
- Swizzle(std::vector<std::pair<int, unsigned> > Src,
- BankSwizzle Swz) const {
- switch (Swz) {
- case ALU_VEC_012:
- break;
- case ALU_VEC_021:
- std::swap(Src[1], Src[2]);
- break;
- case ALU_VEC_102:
- std::swap(Src[0], Src[1]);
- break;
- case ALU_VEC_120:
- std::swap(Src[0], Src[1]);
- std::swap(Src[0], Src[2]);
- break;
- case ALU_VEC_201:
- std::swap(Src[0], Src[2]);
- std::swap(Src[0], Src[1]);
- break;
- case ALU_VEC_210:
- std::swap(Src[0], Src[2]);
- break;
- }
- return Src;
- }
+ // We cannot read LDS source registrs from the Trans slot.
+ if (isTransSlot && TII->readsLDSSrcReg(MI))
+ return false;
- bool isLegal(const std::vector<MachineInstr *> &IG,
- const std::vector<BankSwizzle> &Swz,
- const DenseMap<unsigned, unsigned> &PV) const {
- assert (Swz.size() == IG.size());
- int Vector[4][3];
- memset(Vector, -1, sizeof(Vector));
- for (unsigned i = 0, e = IG.size(); i < e; i++) {
- const std::vector<std::pair<int, unsigned> > &Srcs =
- Swizzle(ExtractSrcs(IG[i], PV), Swz[i]);
- for (unsigned j = 0; j < 3; j++) {
- const std::pair<int, unsigned> &Src = Srcs[j];
- if (Src.first < 0)
- continue;
- if (Vector[Src.second][j] < 0)
- Vector[Src.second][j] = Src.first;
- if (Vector[Src.second][j] != Src.first)
- return false;
- }
- }
+ CurrentPacketMIs.pop_back();
return true;
}
- bool recursiveFitsFPLimitation(
- std::vector<MachineInstr *> IG,
- const DenseMap<unsigned, unsigned> &PV,
- std::vector<BankSwizzle> &SwzCandidate,
- std::vector<MachineInstr *> CurrentlyChecked)
- const {
- if (!isLegal(CurrentlyChecked, SwzCandidate, PV))
- return false;
- if (IG.size() == CurrentlyChecked.size()) {
- return true;
- }
- BankSwizzle AvailableSwizzle[] = {
- ALU_VEC_012,
- ALU_VEC_021,
- ALU_VEC_120,
- ALU_VEC_102,
- ALU_VEC_201,
- ALU_VEC_210
- };
- CurrentlyChecked.push_back(IG[CurrentlyChecked.size()]);
- for (unsigned i = 0; i < 6; i++) {
- SwzCandidate.push_back(AvailableSwizzle[i]);
- if (recursiveFitsFPLimitation(IG, PV, SwzCandidate, CurrentlyChecked))
- return true;
- SwzCandidate.pop_back();
- }
- return false;
- }
-
- bool fitsReadPortLimitation(
- std::vector<MachineInstr *> IG,
- const DenseMap<unsigned, unsigned> &PV)
- const {
- //Todo : support shared src0 - src1 operand
- std::vector<BankSwizzle> SwzCandidate;
- bool Result = recursiveFitsFPLimitation(IG, PV, SwzCandidate,
- std::vector<MachineInstr *>());
- if (!Result)
- return false;
- for (unsigned i = 0, e = IG.size(); i < e; i++) {
- MachineInstr *MI = IG[i];
+ MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
+ MachineBasicBlock::iterator FirstInBundle =
+ CurrentPacketMIs.empty() ? MI : CurrentPacketMIs.front();
+ const DenseMap<unsigned, unsigned> &PV =
+ getPreviousVector(FirstInBundle);
+ std::vector<R600InstrInfo::BankSwizzle> BS;
+ bool isTransSlot;
+
+ if (isBundlableWithCurrentPMI(MI, PV, BS, isTransSlot)) {
+ for (unsigned i = 0, e = CurrentPacketMIs.size(); i < e; i++) {
+ MachineInstr *MI = CurrentPacketMIs[i];
+ unsigned Op = TII->getOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::bank_swizzle);
+ MI->getOperand(Op).setImm(BS[i]);
+ }
unsigned Op = TII->getOperandIdx(MI->getOpcode(),
- R600Operands::BANK_SWIZZLE);
- MI->getOperand(Op).setImm(SwzCandidate[i]);
+ AMDGPU::OpName::bank_swizzle);
+ MI->getOperand(Op).setImm(BS.back());
+ if (!CurrentPacketMIs.empty())
+ setIsLastBit(CurrentPacketMIs.back(), 0);
+ substitutePV(MI, PV);
+ MachineBasicBlock::iterator It = VLIWPacketizerList::addToPacket(MI);
+ if (isTransSlot) {
+ endPacket(llvm::next(It)->getParent(), llvm::next(It));
+ }
+ return It;
}
- return true;
+ endPacket(MI->getParent(), MI);
+ if (TII->isTransOnly(MI))
+ return MI;
+ return VLIWPacketizerList::addToPacket(MI);
}
};
@@ -402,7 +348,8 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) {
MachineBasicBlock::iterator End = MBB->end();
MachineBasicBlock::iterator MI = MBB->begin();
while (MI != End) {
- if (MI->isKill()) {
+ if (MI->isKill() || MI->getOpcode() == AMDGPU::IMPLICIT_DEF ||
+ (MI->getOpcode() == AMDGPU::CF_ALU && !MI->getOperand(8).getImm())) {
MachineBasicBlock::iterator DeleteMI = MI;
++MI;
MBB->erase(DeleteMI);
@@ -450,10 +397,8 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) {
}
-}
+} // end anonymous namespace
llvm::FunctionPass *llvm::createR600Packetizer(TargetMachine &tm) {
return new R600Packetizer(tm);
}
-
-#endif // R600PACKETIZER_CPP
diff --git a/lib/Target/R600/R600RegisterInfo.cpp b/lib/Target/R600/R600RegisterInfo.cpp
index bbd7995..f3bb88b 100644
--- a/lib/Target/R600/R600RegisterInfo.cpp
+++ b/lib/Target/R600/R600RegisterInfo.cpp
@@ -20,16 +20,16 @@
using namespace llvm;
-R600RegisterInfo::R600RegisterInfo(AMDGPUTargetMachine &tm,
- const TargetInstrInfo &tii)
-: AMDGPURegisterInfo(tm, tii),
- TM(tm),
- TII(tii)
- { }
+R600RegisterInfo::R600RegisterInfo(AMDGPUTargetMachine &tm)
+: AMDGPURegisterInfo(tm),
+ TM(tm)
+ { RCW.RegWeight = 0; RCW.WeightLimit = 0;}
BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
+ const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo());
+
Reserved.set(AMDGPU::ZERO);
Reserved.set(AMDGPU::HALF);
Reserved.set(AMDGPU::ONE);
@@ -43,25 +43,15 @@ BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(AMDGPU::PRED_SEL_OFF);
Reserved.set(AMDGPU::PRED_SEL_ZERO);
Reserved.set(AMDGPU::PRED_SEL_ONE);
+ Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
for (TargetRegisterClass::iterator I = AMDGPU::R600_AddrRegClass.begin(),
E = AMDGPU::R600_AddrRegClass.end(); I != E; ++I) {
Reserved.set(*I);
}
- for (TargetRegisterClass::iterator I = AMDGPU::TRegMemRegClass.begin(),
- E = AMDGPU::TRegMemRegClass.end();
- I != E; ++I) {
- Reserved.set(*I);
- }
+ TII->reserveIndirectRegisters(Reserved, MF);
- const R600InstrInfo *RII = static_cast<const R600InstrInfo*>(&TII);
- std::vector<unsigned> IndirectRegs = RII->getIndirectReservedRegs(MF);
- for (std::vector<unsigned>::iterator I = IndirectRegs.begin(),
- E = IndirectRegs.end();
- I != E; ++I) {
- Reserved.set(*I);
- }
return Reserved;
}
@@ -79,6 +69,10 @@ unsigned R600RegisterInfo::getHWRegChan(unsigned reg) const {
return this->getEncodingValue(reg) >> HW_CHAN_SHIFT;
}
+unsigned R600RegisterInfo::getHWRegIndex(unsigned Reg) const {
+ return GET_REG_INDEX(getEncodingValue(Reg));
+}
+
const TargetRegisterClass * R600RegisterInfo::getCFGStructurizerRegClass(
MVT VT) const {
switch(VT.SimpleTy) {
@@ -87,13 +81,20 @@ const TargetRegisterClass * R600RegisterInfo::getCFGStructurizerRegClass(
}
}
-unsigned R600RegisterInfo::getSubRegFromChannel(unsigned Channel) const {
- switch (Channel) {
- default: assert(!"Invalid channel index"); return 0;
- case 0: return AMDGPU::sub0;
- case 1: return AMDGPU::sub1;
- case 2: return AMDGPU::sub2;
- case 3: return AMDGPU::sub3;
- }
+const RegClassWeight &R600RegisterInfo::getRegClassWeight(
+ const TargetRegisterClass *RC) const {
+ return RCW;
}
+bool R600RegisterInfo::isPhysRegLiveAcrossClauses(unsigned Reg) const {
+ assert(!TargetRegisterInfo::isVirtualRegister(Reg));
+
+ switch (Reg) {
+ case AMDGPU::OQAP:
+ case AMDGPU::OQBP:
+ case AMDGPU::AR_X:
+ return false;
+ default:
+ return true;
+ }
+}
diff --git a/lib/Target/R600/R600RegisterInfo.h b/lib/Target/R600/R600RegisterInfo.h
index f9ca918..c74c49e 100644
--- a/lib/Target/R600/R600RegisterInfo.h
+++ b/lib/Target/R600/R600RegisterInfo.h
@@ -21,13 +21,12 @@
namespace llvm {
class R600TargetMachine;
-class TargetInstrInfo;
struct R600RegisterInfo : public AMDGPURegisterInfo {
AMDGPUTargetMachine &TM;
- const TargetInstrInfo &TII;
+ RegClassWeight RCW;
- R600RegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii);
+ R600RegisterInfo(AMDGPUTargetMachine &tm);
virtual BitVector getReservedRegs(const MachineFunction &MF) const;
@@ -40,14 +39,16 @@ struct R600RegisterInfo : public AMDGPURegisterInfo {
/// \brief get the HW encoding for a register's channel.
unsigned getHWRegChan(unsigned reg) const;
+ virtual unsigned getHWRegIndex(unsigned Reg) const;
+
/// \brief get the register class of the specified type to use in the
/// CFGStructurizer
virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const;
- /// \returns the sub reg enum value for the given \p Channel
- /// (e.g. getSubRegFromChannel(0) -> AMDGPU::sel_x)
- unsigned getSubRegFromChannel(unsigned Channel) const;
+ virtual const RegClassWeight &getRegClassWeight(const TargetRegisterClass *RC) const;
+ // \returns true if \p Reg can be defined in one ALU caluse and used in another.
+ virtual bool isPhysRegLiveAcrossClauses(unsigned Reg) const;
};
} // End namespace llvm
diff --git a/lib/Target/R600/R600RegisterInfo.td b/lib/Target/R600/R600RegisterInfo.td
index bfc546b..68bcd20 100644
--- a/lib/Target/R600/R600RegisterInfo.td
+++ b/lib/Target/R600/R600RegisterInfo.td
@@ -23,6 +23,14 @@ class R600Reg_128<string n, list<Register> subregs, bits<16> encoding> :
let HWEncoding = encoding;
}
+class R600Reg_64<string n, list<Register> subregs, bits<16> encoding> :
+ RegisterWithSubRegs<n, subregs> {
+ let Namespace = "AMDGPU";
+ let SubRegIndices = [sub0, sub1];
+ let HWEncoding = encoding;
+}
+
+
foreach Index = 0-127 in {
foreach Chan = [ "X", "Y", "Z", "W" ] in {
// 32-bit Temporary Registers
@@ -31,26 +39,29 @@ foreach Index = 0-127 in {
// Indirect addressing offset registers
def Addr#Index#_#Chan : R600RegWithChan <"T("#Index#" + AR.x)."#Chan,
Index, Chan>;
- def TRegMem#Index#_#Chan : R600RegWithChan <"T"#Index#"."#Chan, Index,
- Chan>;
}
// 128-bit Temporary Registers
- def T#Index#_XYZW : R600Reg_128 <"T"#Index#".XYZW",
+ def T#Index#_XYZW : R600Reg_128 <"T"#Index#"",
[!cast<Register>("T"#Index#"_X"),
!cast<Register>("T"#Index#"_Y"),
!cast<Register>("T"#Index#"_Z"),
!cast<Register>("T"#Index#"_W")],
Index>;
+
+ def T#Index#_XY : R600Reg_64 <"T"#Index#"",
+ [!cast<Register>("T"#Index#"_X"),
+ !cast<Register>("T"#Index#"_Y")],
+ Index>;
}
// KCACHE_BANK0
foreach Index = 159-128 in {
foreach Chan = [ "X", "Y", "Z", "W" ] in {
// 32-bit Temporary Registers
- def KC0_#Index#_#Chan : R600RegWithChan <"KC0["#Index#"-128]."#Chan, Index, Chan>;
+ def KC0_#Index#_#Chan : R600RegWithChan <"KC0["#!add(Index,-128)#"]."#Chan, Index, Chan>;
}
// 128-bit Temporary Registers
- def KC0_#Index#_XYZW : R600Reg_128 <"KC0["#Index#"-128].XYZW",
+ def KC0_#Index#_XYZW : R600Reg_128 <"KC0["#!add(Index, -128)#"].XYZW",
[!cast<Register>("KC0_"#Index#"_X"),
!cast<Register>("KC0_"#Index#"_Y"),
!cast<Register>("KC0_"#Index#"_Z"),
@@ -62,10 +73,10 @@ foreach Index = 159-128 in {
foreach Index = 191-160 in {
foreach Chan = [ "X", "Y", "Z", "W" ] in {
// 32-bit Temporary Registers
- def KC1_#Index#_#Chan : R600RegWithChan <"KC1["#Index#"-160]."#Chan, Index, Chan>;
+ def KC1_#Index#_#Chan : R600RegWithChan <"KC1["#!add(Index,-160)#"]."#Chan, Index, Chan>;
}
// 128-bit Temporary Registers
- def KC1_#Index#_XYZW : R600Reg_128 <"KC1["#Index#"-160].XYZW",
+ def KC1_#Index#_XYZW : R600Reg_128 <"KC1["#!add(Index, -160)#"].XYZW",
[!cast<Register>("KC1_"#Index#"_X"),
!cast<Register>("KC1_"#Index#"_Y"),
!cast<Register>("KC1_"#Index#"_Z"),
@@ -82,6 +93,12 @@ foreach Index = 448-480 in {
// Special Registers
+def OQA : R600Reg<"OQA", 219>;
+def OQB : R600Reg<"OQB", 220>;
+def OQAP : R600Reg<"OQAP", 221>;
+def OQBP : R600Reg<"OQAP", 222>;
+def LDS_DIRECT_A : R600Reg<"LDS_DIRECT_A", 223>;
+def LDS_DIRECT_B : R600Reg<"LDS_DIRECT_B", 224>;
def ZERO : R600Reg<"0.0", 248>;
def ONE : R600Reg<"1.0", 249>;
def NEG_ONE : R600Reg<"-1.0", 249>;
@@ -92,10 +109,11 @@ def ALU_LITERAL_X : R600RegWithChan<"literal.x", 253, "X">;
def ALU_LITERAL_Y : R600RegWithChan<"literal.y", 253, "Y">;
def ALU_LITERAL_Z : R600RegWithChan<"literal.z", 253, "Z">;
def ALU_LITERAL_W : R600RegWithChan<"literal.w", 253, "W">;
-def PV_X : R600RegWithChan<"PV.x", 254, "X">;
-def PV_Y : R600RegWithChan<"PV.y", 254, "Y">;
-def PV_Z : R600RegWithChan<"PV.z", 254, "Z">;
-def PV_W : R600RegWithChan<"PV.w", 254, "W">;
+def PV_X : R600RegWithChan<"PV.X", 254, "X">;
+def PV_Y : R600RegWithChan<"PV.Y", 254, "Y">;
+def PV_Z : R600RegWithChan<"PV.Z", 254, "Z">;
+def PV_W : R600RegWithChan<"PV.W", 254, "W">;
+def PS: R600Reg<"PS", 255>;
def PREDICATE_BIT : R600Reg<"PredicateBit", 0>;
def PRED_SEL_OFF: R600Reg<"Pred_sel_off", 0>;
def PRED_SEL_ZERO : R600Reg<"Pred_sel_zero", 2>;
@@ -115,7 +133,8 @@ let isAllocatable = 0 in {
// XXX: Only use the X channel, until we support wider stack widths
def R600_Addr : RegisterClass <"AMDGPU", [i32], 127, (add (sequence "Addr%u_X", 0, 127))>;
-} // End isAllocatable = 0
+def R600_LDS_SRC_REG : RegisterClass<"AMDGPU", [i32], 32,
+ (add OQA, OQB, OQAP, OQBP, LDS_DIRECT_A, LDS_DIRECT_B)>;
def R600_KC0_X : RegisterClass <"AMDGPU", [f32, i32], 32,
(add (sequence "KC0_%u_X", 128, 159))>;
@@ -149,6 +168,8 @@ def R600_KC1 : RegisterClass <"AMDGPU", [f32, i32], 32,
(interleave R600_KC1_X, R600_KC1_Y,
R600_KC1_Z, R600_KC1_W)>;
+} // End isAllocatable = 0
+
def R600_TReg32_X : RegisterClass <"AMDGPU", [f32, i32], 32,
(add (sequence "T%u_X", 0, 127), AR_X)>;
@@ -169,8 +190,9 @@ def R600_Reg32 : RegisterClass <"AMDGPU", [f32, i32], 32, (add
R600_TReg32,
R600_ArrayBase,
R600_Addr,
+ R600_KC0, R600_KC1,
ZERO, HALF, ONE, ONE_INT, PV_X, ALU_LITERAL_X, NEG_ONE, NEG_HALF,
- ALU_CONST, ALU_PARAM
+ ALU_CONST, ALU_PARAM, OQAP
)>;
def R600_Predicate : RegisterClass <"AMDGPU", [i32], 32, (add
@@ -184,32 +206,5 @@ def R600_Reg128 : RegisterClass<"AMDGPU", [v4f32, v4i32], 128,
let CopyCost = -1;
}
-//===----------------------------------------------------------------------===//
-// Register classes for indirect addressing
-//===----------------------------------------------------------------------===//
-
-// Super register for all the Indirect Registers. This register class is used
-// by the REG_SEQUENCE instruction to specify the registers to use for direct
-// reads / writes which may be written / read by an indirect address.
-class IndirectSuper<string n, list<Register> subregs> :
- RegisterWithSubRegs<n, subregs> {
- let Namespace = "AMDGPU";
- let SubRegIndices =
- [sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7,
- sub8, sub9, sub10, sub11, sub12, sub13, sub14, sub15];
-}
-
-def IndirectSuperReg : IndirectSuper<"Indirect",
- [TRegMem0_X, TRegMem1_X, TRegMem2_X, TRegMem3_X, TRegMem4_X, TRegMem5_X,
- TRegMem6_X, TRegMem7_X, TRegMem8_X, TRegMem9_X, TRegMem10_X, TRegMem11_X,
- TRegMem12_X, TRegMem13_X, TRegMem14_X, TRegMem15_X]
->;
-
-def IndirectReg : RegisterClass<"AMDGPU", [f32, i32], 32, (add IndirectSuperReg)>;
-
-// This register class defines the registers that are the storage units for
-// the "Indirect Addressing" pseudo memory space.
-// XXX: Only use the X channel, until we support wider stack widths
-def TRegMem : RegisterClass<"AMDGPU", [f32, i32], 32,
- (add (sequence "TRegMem%u_X", 0, 16))
->;
+def R600_Reg64 : RegisterClass<"AMDGPU", [v2f32, v2i32], 64,
+ (add (sequence "T%u_XY", 0, 63))>;
diff --git a/lib/Target/R600/R600Schedule.td b/lib/Target/R600/R600Schedule.td
index 78a460a..df62bf8 100644
--- a/lib/Target/R600/R600Schedule.td
+++ b/lib/Target/R600/R600Schedule.td
@@ -23,14 +23,16 @@ def TRANS : FuncUnit;
def AnyALU : InstrItinClass;
def VecALU : InstrItinClass;
def TransALU : InstrItinClass;
+def XALU : InstrItinClass;
def R600_VLIW5_Itin : ProcessorItineraries <
[ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS, ALU_NULL],
[],
[
InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W, TRANS]>]>,
- InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_X, ALU_W]>]>,
+ InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>,
InstrItinData<TransALU, [InstrStage<1, [TRANS]>]>,
+ InstrItinData<XALU, [InstrStage<1, [ALU_X]>]>,
InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]>
]
>;
@@ -40,7 +42,7 @@ def R600_VLIW4_Itin : ProcessorItineraries <
[],
[
InstrItinData<AnyALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>,
- InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_X, ALU_W]>]>,
+ InstrItinData<VecALU, [InstrStage<1, [ALU_X, ALU_Y, ALU_Z, ALU_W]>]>,
InstrItinData<TransALU, [InstrStage<1, [ALU_NULL]>]>,
InstrItinData<NullALU, [InstrStage<1, [ALU_NULL]>]>
]
diff --git a/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp b/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
new file mode 100644
index 0000000..3258894
--- /dev/null
+++ b/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
@@ -0,0 +1,303 @@
+//===-- R600TextureIntrinsicsReplacer.cpp ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This pass translates tgsi-like texture intrinsics into R600 texture
+/// closer to hardware intrinsics.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/InstVisitor.h"
+
+using namespace llvm;
+
+namespace {
+class R600TextureIntrinsicsReplacer :
+ public FunctionPass, public InstVisitor<R600TextureIntrinsicsReplacer> {
+ static char ID;
+
+ Module *Mod;
+ Type *FloatType;
+ Type *Int32Type;
+ Type *V4f32Type;
+ Type *V4i32Type;
+ FunctionType *TexSign;
+ FunctionType *TexQSign;
+
+ void getAdjustmentFromTextureTarget(unsigned TextureType, bool hasLOD,
+ unsigned SrcSelect[4], unsigned CT[4],
+ bool &useShadowVariant) {
+ enum TextureTypes {
+ TEXTURE_1D = 1,
+ TEXTURE_2D,
+ TEXTURE_3D,
+ TEXTURE_CUBE,
+ TEXTURE_RECT,
+ TEXTURE_SHADOW1D,
+ TEXTURE_SHADOW2D,
+ TEXTURE_SHADOWRECT,
+ TEXTURE_1D_ARRAY,
+ TEXTURE_2D_ARRAY,
+ TEXTURE_SHADOW1D_ARRAY,
+ TEXTURE_SHADOW2D_ARRAY,
+ TEXTURE_SHADOWCUBE,
+ TEXTURE_2D_MSAA,
+ TEXTURE_2D_ARRAY_MSAA,
+ TEXTURE_CUBE_ARRAY,
+ TEXTURE_SHADOWCUBE_ARRAY
+ };
+
+ switch (TextureType) {
+ case 0:
+ useShadowVariant = false;
+ return;
+ case TEXTURE_RECT:
+ case TEXTURE_1D:
+ case TEXTURE_2D:
+ case TEXTURE_3D:
+ case TEXTURE_CUBE:
+ case TEXTURE_1D_ARRAY:
+ case TEXTURE_2D_ARRAY:
+ case TEXTURE_CUBE_ARRAY:
+ case TEXTURE_2D_MSAA:
+ case TEXTURE_2D_ARRAY_MSAA:
+ useShadowVariant = false;
+ break;
+ case TEXTURE_SHADOW1D:
+ case TEXTURE_SHADOW2D:
+ case TEXTURE_SHADOWRECT:
+ case TEXTURE_SHADOW1D_ARRAY:
+ case TEXTURE_SHADOW2D_ARRAY:
+ case TEXTURE_SHADOWCUBE:
+ case TEXTURE_SHADOWCUBE_ARRAY:
+ useShadowVariant = true;
+ break;
+ default:
+ llvm_unreachable("Unknow Texture Type");
+ }
+
+ if (TextureType == TEXTURE_RECT ||
+ TextureType == TEXTURE_SHADOWRECT) {
+ CT[0] = 0;
+ CT[1] = 0;
+ }
+
+ if (TextureType == TEXTURE_CUBE_ARRAY ||
+ TextureType == TEXTURE_SHADOWCUBE_ARRAY)
+ CT[2] = 0;
+
+ if (TextureType == TEXTURE_1D_ARRAY ||
+ TextureType == TEXTURE_SHADOW1D_ARRAY) {
+ if (hasLOD && useShadowVariant) {
+ CT[1] = 0;
+ } else {
+ CT[2] = 0;
+ SrcSelect[2] = 1;
+ }
+ } else if (TextureType == TEXTURE_2D_ARRAY ||
+ TextureType == TEXTURE_SHADOW2D_ARRAY) {
+ CT[2] = 0;
+ }
+
+ if ((TextureType == TEXTURE_SHADOW1D ||
+ TextureType == TEXTURE_SHADOW2D ||
+ TextureType == TEXTURE_SHADOWRECT ||
+ TextureType == TEXTURE_SHADOW1D_ARRAY) &&
+ !(hasLOD && useShadowVariant))
+ SrcSelect[3] = 2;
+ }
+
+ void ReplaceCallInst(CallInst &I, FunctionType *FT, const char *Name,
+ unsigned SrcSelect[4], Value *Offset[3], Value *Resource,
+ Value *Sampler, unsigned CT[4], Value *Coord) {
+ IRBuilder<> Builder(&I);
+ Constant *Mask[] = {
+ ConstantInt::get(Int32Type, SrcSelect[0]),
+ ConstantInt::get(Int32Type, SrcSelect[1]),
+ ConstantInt::get(Int32Type, SrcSelect[2]),
+ ConstantInt::get(Int32Type, SrcSelect[3])
+ };
+ Value *SwizzleMask = ConstantVector::get(Mask);
+ Value *SwizzledCoord =
+ Builder.CreateShuffleVector(Coord, Coord, SwizzleMask);
+
+ Value *Args[] = {
+ SwizzledCoord,
+ Offset[0],
+ Offset[1],
+ Offset[2],
+ Resource,
+ Sampler,
+ ConstantInt::get(Int32Type, CT[0]),
+ ConstantInt::get(Int32Type, CT[1]),
+ ConstantInt::get(Int32Type, CT[2]),
+ ConstantInt::get(Int32Type, CT[3])
+ };
+
+ Function *F = Mod->getFunction(Name);
+ if (!F) {
+ F = Function::Create(FT, GlobalValue::ExternalLinkage, Name, Mod);
+ F->addFnAttr(Attribute::ReadNone);
+ }
+ I.replaceAllUsesWith(Builder.CreateCall(F, Args));
+ I.eraseFromParent();
+ }
+
+ void ReplaceTexIntrinsic(CallInst &I, bool hasLOD, FunctionType *FT,
+ const char *VanillaInt,
+ const char *ShadowInt) {
+ Value *Coord = I.getArgOperand(0);
+ Value *ResourceId = I.getArgOperand(1);
+ Value *SamplerId = I.getArgOperand(2);
+
+ unsigned TextureType =
+ dyn_cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+
+ unsigned SrcSelect[4] = { 0, 1, 2, 3 };
+ unsigned CT[4] = {1, 1, 1, 1};
+ Value *Offset[3] = {
+ ConstantInt::get(Int32Type, 0),
+ ConstantInt::get(Int32Type, 0),
+ ConstantInt::get(Int32Type, 0)
+ };
+ bool useShadowVariant;
+
+ getAdjustmentFromTextureTarget(TextureType, hasLOD, SrcSelect, CT,
+ useShadowVariant);
+
+ ReplaceCallInst(I, FT, useShadowVariant?ShadowInt:VanillaInt, SrcSelect,
+ Offset, ResourceId, SamplerId, CT, Coord);
+ }
+
+ void ReplaceTXF(CallInst &I) {
+ Value *Coord = I.getArgOperand(0);
+ Value *ResourceId = I.getArgOperand(4);
+ Value *SamplerId = I.getArgOperand(5);
+
+ unsigned TextureType =
+ dyn_cast<ConstantInt>(I.getArgOperand(6))->getZExtValue();
+
+ unsigned SrcSelect[4] = { 0, 1, 2, 3 };
+ unsigned CT[4] = {1, 1, 1, 1};
+ Value *Offset[3] = {
+ I.getArgOperand(1),
+ I.getArgOperand(2),
+ I.getArgOperand(3),
+ };
+ bool useShadowVariant;
+
+ getAdjustmentFromTextureTarget(TextureType, false, SrcSelect, CT,
+ useShadowVariant);
+
+ ReplaceCallInst(I, TexQSign, "llvm.R600.txf", SrcSelect,
+ Offset, ResourceId, SamplerId, CT, Coord);
+ }
+
+public:
+ R600TextureIntrinsicsReplacer():
+ FunctionPass(ID) {
+ }
+
+ virtual bool doInitialization(Module &M) {
+ LLVMContext &Ctx = M.getContext();
+ Mod = &M;
+ FloatType = Type::getFloatTy(Ctx);
+ Int32Type = Type::getInt32Ty(Ctx);
+ V4f32Type = VectorType::get(FloatType, 4);
+ V4i32Type = VectorType::get(Int32Type, 4);
+ Type *ArgsType[] = {
+ V4f32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ };
+ TexSign = FunctionType::get(V4f32Type, ArgsType, /*isVarArg=*/false);
+ Type *ArgsQType[] = {
+ V4i32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ Int32Type,
+ };
+ TexQSign = FunctionType::get(V4f32Type, ArgsQType, /*isVarArg=*/false);
+ return false;
+ }
+
+ virtual bool runOnFunction(Function &F) {
+ visit(F);
+ return false;
+ }
+
+ virtual const char *getPassName() const {
+ return "R600 Texture Intrinsics Replacer";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ }
+
+ void visitCallInst(CallInst &I) {
+ if (!I.getCalledFunction())
+ return;
+
+ StringRef Name = I.getCalledFunction()->getName();
+ if (Name == "llvm.AMDGPU.tex") {
+ ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.tex", "llvm.R600.texc");
+ return;
+ }
+ if (Name == "llvm.AMDGPU.txl") {
+ ReplaceTexIntrinsic(I, true, TexSign, "llvm.R600.txl", "llvm.R600.txlc");
+ return;
+ }
+ if (Name == "llvm.AMDGPU.txb") {
+ ReplaceTexIntrinsic(I, true, TexSign, "llvm.R600.txb", "llvm.R600.txbc");
+ return;
+ }
+ if (Name == "llvm.AMDGPU.txf") {
+ ReplaceTXF(I);
+ return;
+ }
+ if (Name == "llvm.AMDGPU.txq") {
+ ReplaceTexIntrinsic(I, false, TexQSign, "llvm.R600.txq", "llvm.R600.txq");
+ return;
+ }
+ if (Name == "llvm.AMDGPU.ddx") {
+ ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.ddx", "llvm.R600.ddx");
+ return;
+ }
+ if (Name == "llvm.AMDGPU.ddy") {
+ ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.ddy", "llvm.R600.ddy");
+ return;
+ }
+ }
+
+};
+
+char R600TextureIntrinsicsReplacer::ID = 0;
+
+}
+
+FunctionPass *llvm::createR600TextureIntrinsicsReplacer() {
+ return new R600TextureIntrinsicsReplacer();
+}
diff --git a/lib/Target/R600/SIAnnotateControlFlow.cpp b/lib/Target/R600/SIAnnotateControlFlow.cpp
index 2477e2a..6bbdf59 100644
--- a/lib/Target/R600/SIAnnotateControlFlow.cpp
+++ b/lib/Target/R600/SIAnnotateControlFlow.cpp
@@ -15,6 +15,8 @@
#include "AMDGPU.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -29,13 +31,13 @@ typedef std::pair<BasicBlock *, Value *> StackEntry;
typedef SmallVector<StackEntry, 16> StackVector;
// Intrinsic names the control flow is annotated with
-static const char *IfIntrinsic = "llvm.SI.if";
-static const char *ElseIntrinsic = "llvm.SI.else";
-static const char *BreakIntrinsic = "llvm.SI.break";
-static const char *IfBreakIntrinsic = "llvm.SI.if.break";
-static const char *ElseBreakIntrinsic = "llvm.SI.else.break";
-static const char *LoopIntrinsic = "llvm.SI.loop";
-static const char *EndCfIntrinsic = "llvm.SI.end.cf";
+static const char *const IfIntrinsic = "llvm.SI.if";
+static const char *const ElseIntrinsic = "llvm.SI.else";
+static const char *const BreakIntrinsic = "llvm.SI.break";
+static const char *const IfBreakIntrinsic = "llvm.SI.if.break";
+static const char *const ElseBreakIntrinsic = "llvm.SI.else.break";
+static const char *const LoopIntrinsic = "llvm.SI.loop";
+static const char *const EndCfIntrinsic = "llvm.SI.end.cf";
class SIAnnotateControlFlow : public FunctionPass {
diff --git a/lib/Target/R600/SIDefines.h b/lib/Target/R600/SIDefines.h
index 716b093..2cbce28 100644
--- a/lib/Target/R600/SIDefines.h
+++ b/lib/Target/R600/SIDefines.h
@@ -11,12 +11,28 @@
#ifndef SIDEFINES_H_
#define SIDEFINES_H_
+namespace SIInstrFlags {
+enum {
+ MIMG = 1 << 3,
+ SMRD = 1 << 4,
+ VOP1 = 1 << 5,
+ VOP2 = 1 << 6,
+ VOP3 = 1 << 7,
+ VOPC = 1 << 8,
+ SALU = 1 << 9
+};
+}
+
#define R_00B028_SPI_SHADER_PGM_RSRC1_PS 0x00B028
+#define R_00B02C_SPI_SHADER_PGM_RSRC2_PS 0x00B02C
+#define S_00B02C_EXTRA_LDS_SIZE(x) (((x) & 0xFF) << 8)
#define R_00B128_SPI_SHADER_PGM_RSRC1_VS 0x00B128
#define R_00B228_SPI_SHADER_PGM_RSRC1_GS 0x00B228
#define R_00B848_COMPUTE_PGM_RSRC1 0x00B848
#define S_00B028_VGPRS(x) (((x) & 0x3F) << 0)
#define S_00B028_SGPRS(x) (((x) & 0x0F) << 6)
+#define R_00B84C_COMPUTE_PGM_RSRC2 0x00B84C
+#define S_00B84C_LDS_SIZE(x) (((x) & 0x1FF) << 15)
#define R_0286CC_SPI_PS_INPUT_ENA 0x0286CC
#endif // SIDEFINES_H_
diff --git a/lib/Target/R600/SIFixSGPRCopies.cpp b/lib/Target/R600/SIFixSGPRCopies.cpp
new file mode 100644
index 0000000..3370c79
--- /dev/null
+++ b/lib/Target/R600/SIFixSGPRCopies.cpp
@@ -0,0 +1,263 @@
+//===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Copies from VGPR to SGPR registers are illegal and the register coalescer
+/// will sometimes generate these illegal copies in situations like this:
+///
+/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
+///
+/// BB0:
+/// %vreg0 <sgpr> = SCALAR_INST
+/// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
+/// ...
+/// BRANCH %cond BB1, BB2
+/// BB1:
+/// %vreg2 <vgpr> = VECTOR_INST
+/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// BB2:
+/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
+/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
+///
+///
+/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
+/// code will look like this:
+///
+/// BB0:
+/// %vreg0 <sgpr> = SCALAR_INST
+/// ...
+/// BRANCH %cond BB1, BB2
+/// BB1:
+/// %vreg2 <vgpr> = VECTOR_INST
+/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// BB2:
+/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
+/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+///
+/// Now that the result of the PHI instruction is an SGPR, the register
+/// allocator is now forced to constrain the register class of %vreg3 to
+/// <sgpr> so we end up with final code like this:
+///
+/// BB0:
+/// %vreg0 <sgpr> = SCALAR_INST
+/// ...
+/// BRANCH %cond BB1, BB2
+/// BB1:
+/// %vreg2 <vgpr> = VECTOR_INST
+/// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
+/// BB2:
+/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
+/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+///
+/// Now this code contains an illegal copy from a VGPR to an SGPR.
+///
+/// In order to avoid this problem, this pass searches for PHI instructions
+/// which define a <vsrc> register and constrains its definition class to
+/// <vgpr> if the user of the PHI's definition register is a vector instruction.
+/// If the PHI's definition class is constrained to <vgpr> then the coalescer
+/// will be unable to perform the COPY removal from the above example which
+/// ultimately led to the creation of an illegal COPY.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "sgpr-copies"
+#include "AMDGPU.h"
+#include "SIInstrInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+namespace {
+
+class SIFixSGPRCopies : public MachineFunctionPass {
+
+private:
+ static char ID;
+ const TargetRegisterClass *inferRegClassFromUses(const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg,
+ unsigned SubReg) const;
+ const TargetRegisterClass *inferRegClassFromDef(const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg,
+ unsigned SubReg) const;
+ bool isVGPRToSGPRCopy(const MachineInstr &Copy, const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI) const;
+
+public:
+ SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const {
+ return "SI Fix SGPR copies";
+ }
+
+};
+
+} // End anonymous namespace
+
+char SIFixSGPRCopies::ID = 0;
+
+FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) {
+ return new SIFixSGPRCopies(tm);
+}
+
+static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
+ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ if (!MI.getOperand(i).isReg() ||
+ !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
+ continue;
+
+ if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
+ return true;
+ }
+ return false;
+}
+
+/// This functions walks the use list of Reg until it finds an Instruction
+/// that isn't a COPY returns the register class of that instruction.
+/// \return The register defined by the first non-COPY instruction.
+const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromUses(
+ const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg,
+ unsigned SubReg) const {
+ // The Reg parameter to the function must always be defined by either a PHI
+ // or a COPY, therefore it cannot be a physical register.
+ assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
+ "Reg cannot be a physical register");
+
+ const TargetRegisterClass *RC = MRI.getRegClass(Reg);
+ RC = TRI->getSubRegClass(RC, SubReg);
+ for (MachineRegisterInfo::use_iterator I = MRI.use_begin(Reg),
+ E = MRI.use_end(); I != E; ++I) {
+ switch (I->getOpcode()) {
+ case AMDGPU::COPY:
+ RC = TRI->getCommonSubClass(RC, inferRegClassFromUses(TRI, MRI,
+ I->getOperand(0).getReg(),
+ I->getOperand(0).getSubReg()));
+ break;
+ }
+ }
+
+ return RC;
+}
+
+const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromDef(
+ const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg,
+ unsigned SubReg) const {
+ if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
+ const TargetRegisterClass *RC = TRI->getPhysRegClass(Reg);
+ return TRI->getSubRegClass(RC, SubReg);
+ }
+ MachineInstr *Def = MRI.getVRegDef(Reg);
+ if (Def->getOpcode() != AMDGPU::COPY) {
+ return TRI->getSubRegClass(MRI.getRegClass(Reg), SubReg);
+ }
+
+ return inferRegClassFromDef(TRI, MRI, Def->getOperand(1).getReg(),
+ Def->getOperand(1).getSubReg());
+}
+
+bool SIFixSGPRCopies::isVGPRToSGPRCopy(const MachineInstr &Copy,
+ const SIRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI) const {
+
+ unsigned DstReg = Copy.getOperand(0).getReg();
+ unsigned SrcReg = Copy.getOperand(1).getReg();
+ unsigned SrcSubReg = Copy.getOperand(1).getSubReg();
+ const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
+ const TargetRegisterClass *SrcRC;
+
+ if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
+ DstRC == &AMDGPU::M0RegRegClass)
+ return false;
+
+ SrcRC = inferRegClassFromDef(TRI, MRI, SrcReg, SrcSubReg);
+ return TRI->isSGPRClass(DstRC) && TRI->hasVGPRs(SrcRC);
+}
+
+bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
+ MF.getTarget().getRegisterInfo());
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
+ MF.getTarget().getInstrInfo());
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
+ BI != BE; ++BI) {
+
+ MachineBasicBlock &MBB = *BI;
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ I != E; ++I) {
+ MachineInstr &MI = *I;
+ if (MI.getOpcode() == AMDGPU::COPY && isVGPRToSGPRCopy(MI, TRI, MRI)) {
+ DEBUG(dbgs() << "Fixing VGPR -> SGPR copy:\n");
+ DEBUG(MI.print(dbgs()));
+ TII->moveToVALU(MI);
+
+ }
+
+ switch (MI.getOpcode()) {
+ default: continue;
+ case AMDGPU::PHI: {
+ DEBUG(dbgs() << " Fixing PHI:\n");
+ DEBUG(MI.print(dbgs()));
+
+ for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
+ unsigned Reg = MI.getOperand(i).getReg();
+ const TargetRegisterClass *RC = inferRegClassFromDef(TRI, MRI, Reg,
+ MI.getOperand(0).getSubReg());
+ MRI.constrainRegClass(Reg, RC);
+ }
+ unsigned Reg = MI.getOperand(0).getReg();
+ const TargetRegisterClass *RC = inferRegClassFromUses(TRI, MRI, Reg,
+ MI.getOperand(0).getSubReg());
+ if (TRI->getCommonSubClass(RC, &AMDGPU::VReg_32RegClass)) {
+ MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass);
+ }
+
+ if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
+ break;
+
+ // If a PHI node defines an SGPR and any of its operands are VGPRs,
+ // then we need to move it to the VALU.
+ for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
+ unsigned Reg = MI.getOperand(i).getReg();
+ if (TRI->hasVGPRs(MRI.getRegClass(Reg))) {
+ TII->moveToVALU(MI);
+ break;
+ }
+ }
+
+ break;
+ }
+ case AMDGPU::REG_SEQUENCE: {
+ if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
+ !hasVGPROperands(MI, TRI))
+ continue;
+
+ DEBUG(dbgs() << "Fixing REG_SEQUENCE:\n");
+ DEBUG(MI.print(dbgs()));
+
+ TII->moveToVALU(MI);
+ break;
+ }
+ }
+ }
+ }
+ return false;
+}
diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
index 6bd82a5..d5d2b68 100644
--- a/lib/Target/R600/SIISelLowering.cpp
+++ b/lib/Target/R600/SIISelLowering.cpp
@@ -13,39 +13,36 @@
//===----------------------------------------------------------------------===//
#include "SIISelLowering.h"
-#include "AMDIL.h"
#include "AMDGPU.h"
#include "AMDILIntrinsicInfo.h"
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
-#include "llvm/IR/Function.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/IR/Function.h"
+
+const uint64_t RSRC_DATA_FORMAT = 0xf00000000000LL;
using namespace llvm;
SITargetLowering::SITargetLowering(TargetMachine &TM) :
- AMDGPUTargetLowering(TM),
- TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo())),
- TRI(TM.getRegisterInfo()) {
+ AMDGPUTargetLowering(TM) {
addRegisterClass(MVT::i1, &AMDGPU::SReg_64RegClass);
- addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
+ addRegisterClass(MVT::i64, &AMDGPU::VSrc_64RegClass);
- addRegisterClass(MVT::v16i8, &AMDGPU::SReg_128RegClass);
addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass);
addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass);
- addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
- addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
+ addRegisterClass(MVT::i32, &AMDGPU::VSrc_32RegClass);
+ addRegisterClass(MVT::f32, &AMDGPU::VSrc_32RegClass);
- addRegisterClass(MVT::v1i32, &AMDGPU::VReg_32RegClass);
-
- addRegisterClass(MVT::v2i32, &AMDGPU::VReg_64RegClass);
- addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
+ addRegisterClass(MVT::f64, &AMDGPU::VSrc_64RegClass);
+ addRegisterClass(MVT::v2i32, &AMDGPU::VSrc_64RegClass);
+ addRegisterClass(MVT::v2f32, &AMDGPU::VSrc_64RegClass);
addRegisterClass(MVT::v4i32, &AMDGPU::VReg_128RegClass);
addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
@@ -59,6 +56,21 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
computeRegisterProperties();
+ // Condition Codes
+ setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::f32, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
+
+ setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUGE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETULE, MVT::f64, Expand);
+ setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
+
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
@@ -66,14 +78,66 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setOperationAction(ISD::ADD, MVT::i64, Legal);
setOperationAction(ISD::ADD, MVT::i32, Legal);
+ setOperationAction(ISD::ADDC, MVT::i32, Legal);
+ setOperationAction(ISD::ADDE, MVT::i32, Legal);
+
+ setOperationAction(ISD::BITCAST, MVT::i128, Legal);
+
+ // We need to custom lower vector stores from local memory
+ setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
+
+ setOperationAction(ISD::STORE, MVT::v8i32, Custom);
+ setOperationAction(ISD::STORE, MVT::v16i32, Custom);
+
+ // We need to custom lower loads/stores from private memory
+ setOperationAction(ISD::LOAD, MVT::i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::i64, Custom);
+ setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
+
+ setOperationAction(ISD::STORE, MVT::i32, Custom);
+ setOperationAction(ISD::STORE, MVT::i64, Custom);
+ setOperationAction(ISD::STORE, MVT::i128, Custom);
+ setOperationAction(ISD::STORE, MVT::v2i32, Custom);
+ setOperationAction(ISD::STORE, MVT::v4i32, Custom);
+
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
- setOperationAction(ISD::STORE, MVT::i32, Custom);
- setOperationAction(ISD::STORE, MVT::i64, Custom);
+ setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
+ setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
+
+ setOperationAction(ISD::ANY_EXTEND, MVT::i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND, MVT::i64, Custom);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::i64, Custom);
+
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v16i8, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
+
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
+
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i32, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, Expand);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i32, Expand);
+ setTruncStoreAction(MVT::i128, MVT::i64, Expand);
+ setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
+ setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
setTargetDAGCombine(ISD::SELECT_CC);
@@ -82,12 +146,45 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setSchedulingPreference(Sched::RegPressure);
}
+//===----------------------------------------------------------------------===//
+// TargetLowering queries
+//===----------------------------------------------------------------------===//
+
+bool SITargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ bool *IsFast) const {
+ // XXX: This depends on the address space and also we may want to revist
+ // the alignment values we specify in the DataLayout.
+ if (!VT.isSimple() || VT == MVT::Other)
+ return false;
+ return VT.bitsGT(MVT::i32);
+}
+
+bool SITargetLowering::shouldSplitVectorElementType(EVT VT) const {
+ return VT.bitsLE(MVT::i16);
+}
+
+SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
+ SDLoc DL, SDValue Chain,
+ unsigned Offset) const {
+ MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
+ PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
+ AMDGPUAS::CONSTANT_ADDRESS);
+ SDValue BasePtr = DAG.getCopyFromReg(Chain, DL,
+ MRI.getLiveInVirtReg(AMDGPU::SGPR0_SGPR1), MVT::i64);
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
+ DAG.getConstant(Offset, MVT::i64));
+ return DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, Chain, Ptr,
+ MachinePointerInfo(UndefValue::get(PtrTy)), MemVT,
+ false, false, MemVT.getSizeInBits() >> 3);
+
+}
+
SDValue SITargetLowering::LowerFormalArguments(
SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
@@ -103,9 +200,10 @@ SDValue SITargetLowering::LowerFormalArguments(
for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) {
const ISD::InputArg &Arg = Ins[i];
-
- // First check if it's a PS input addr
- if (Info->ShaderType == ShaderType::PIXEL && !Arg.Flags.isInReg()) {
+
+ // First check if it's a PS input addr
+ if (Info->ShaderType == ShaderType::PIXEL && !Arg.Flags.isInReg() &&
+ !Arg.Flags.isByVal()) {
assert((PSInputNum <= 15) && "Too many PS inputs!");
@@ -120,7 +218,7 @@ SDValue SITargetLowering::LowerFormalArguments(
}
// Second split vertices into their elements
- if (Arg.VT.isVector()) {
+ if (Info->ShaderType != ShaderType::COMPUTE && Arg.VT.isVector()) {
ISD::InputArg NewArg = Arg;
NewArg.Flags.setSplit();
NewArg.VT = Arg.VT.getVectorElementType();
@@ -136,7 +234,7 @@ SDValue SITargetLowering::LowerFormalArguments(
NewArg.PartOffset += NewArg.VT.getStoreSize();
}
- } else {
+ } else if (Info->ShaderType != ShaderType::COMPUTE) {
Splits.push_back(Arg);
}
}
@@ -152,20 +250,44 @@ SDValue SITargetLowering::LowerFormalArguments(
CCInfo.AllocateReg(AMDGPU::VGPR1);
}
+ // The pointer to the list of arguments is stored in SGPR0, SGPR1
+ if (Info->ShaderType == ShaderType::COMPUTE) {
+ CCInfo.AllocateReg(AMDGPU::SGPR0);
+ CCInfo.AllocateReg(AMDGPU::SGPR1);
+ MF.addLiveIn(AMDGPU::SGPR0_SGPR1, &AMDGPU::SReg_64RegClass);
+ }
+
+ if (Info->ShaderType == ShaderType::COMPUTE) {
+ getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
+ Splits);
+ }
+
AnalyzeFormalArguments(CCInfo, Splits);
for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
+ const ISD::InputArg &Arg = Ins[i];
if (Skipped & (1 << i)) {
- InVals.push_back(SDValue());
+ InVals.push_back(DAG.getUNDEF(Arg.VT));
continue;
}
CCValAssign &VA = ArgLocs[ArgIdx++];
+ EVT VT = VA.getLocVT();
+
+ if (VA.isMemLoc()) {
+ VT = Ins[i].VT;
+ EVT MemVT = Splits[i].VT;
+ // The first 36 bytes of the input buffer contains information about
+ // thread group and global sizes.
+ SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, DAG.getRoot(),
+ 36 + VA.getLocMemOffset());
+ InVals.push_back(Arg);
+ continue;
+ }
assert(VA.isRegLoc() && "Parameter must be in a register!");
unsigned Reg = VA.getLocReg();
- MVT VT = VA.getLocVT();
if (VT == MVT::i64) {
// For now assume it is a pointer
@@ -181,7 +303,6 @@ SDValue SITargetLowering::LowerFormalArguments(
Reg = MF.addLiveIn(Reg, RC);
SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
- const ISD::InputArg &Arg = Ins[i];
if (Arg.VT.isVector()) {
// Build a vector from the registers
@@ -200,7 +321,7 @@ SDValue SITargetLowering::LowerFormalArguments(
NumElements = Arg.VT.getVectorNumElements() - NumElements;
for (unsigned j = 0; j != NumElements; ++j)
Regs.push_back(DAG.getUNDEF(VT));
-
+
InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT,
Regs.data(), Regs.size()));
continue;
@@ -214,36 +335,274 @@ SDValue SITargetLowering::LowerFormalArguments(
MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
MachineInstr * MI, MachineBasicBlock * BB) const {
+ MachineBasicBlock::iterator I = *MI;
+
switch (MI->getOpcode()) {
default:
return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
case AMDGPU::BRANCH: return BB;
+ case AMDGPU::SI_ADDR64_RSRC: {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ unsigned SuperReg = MI->getOperand(0).getReg();
+ unsigned SubRegLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned SubRegHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned SubRegHiHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ unsigned SubRegHiLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), SubRegLo)
+ .addOperand(MI->getOperand(1));
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), SubRegHiLo)
+ .addImm(0);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), SubRegHiHi)
+ .addImm(RSRC_DATA_FORMAT >> 32);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE), SubRegHi)
+ .addReg(SubRegHiLo)
+ .addImm(AMDGPU::sub0)
+ .addReg(SubRegHiHi)
+ .addImm(AMDGPU::sub1);
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE), SuperReg)
+ .addReg(SubRegLo)
+ .addImm(AMDGPU::sub0_sub1)
+ .addReg(SubRegHi)
+ .addImm(AMDGPU::sub2_sub3);
+ MI->eraseFromParent();
+ break;
+ }
+ case AMDGPU::V_SUB_F64: {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_ADD_F64),
+ MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addReg(MI->getOperand(2).getReg())
+ .addImm(0) /* src2 */
+ .addImm(0) /* ABS */
+ .addImm(0) /* CLAMP */
+ .addImm(0) /* OMOD */
+ .addImm(2); /* NEG */
+ MI->eraseFromParent();
+ break;
+ }
+ case AMDGPU::SI_RegisterStorePseudo: {
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::SI_RegisterStore),
+ Reg);
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ MI->eraseFromParent();
+ }
}
return BB;
}
-EVT SITargetLowering::getSetCCResultType(EVT VT) const {
- return MVT::i1;
+EVT SITargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
+ if (!VT.isVector()) {
+ return MVT::i1;
+ }
+ return MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
}
MVT SITargetLowering::getScalarShiftAmountTy(EVT VT) const {
return MVT::i32;
}
+bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
+ VT = VT.getScalarType();
+
+ if (!VT.isSimple())
+ return false;
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::f32:
+ return false; /* There is V_MAD_F32 for f32 */
+ case MVT::f64:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// Custom DAG Lowering Operations
//===----------------------------------------------------------------------===//
SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
switch (Op.getOpcode()) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
+ case ISD::ADD: return LowerADD(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
+ case ISD::LOAD: {
+ LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
+ if ((Load->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
+ Load->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
+ Op.getValueType().isVector()) {
+ SDValue MergedValues[2] = {
+ SplitVectorLoad(Op, DAG),
+ Load->getChain()
+ };
+ return DAG.getMergeValues(MergedValues, 2, SDLoc(Op));
+ } else {
+ return LowerLOAD(Op, DAG);
+ }
+ }
+
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
+ case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
+ case ISD::ANY_EXTEND: // Fall-through
+ case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, DAG);
+ case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntrinsicID =
+ cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ //XXX: Hardcoded we only use two to store the pointer to the parameters.
+ unsigned NumUserSGPRs = 2;
+ switch (IntrinsicID) {
+ default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
+ case Intrinsic::r600_read_ngroups_x:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 0);
+ case Intrinsic::r600_read_ngroups_y:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4);
+ case Intrinsic::r600_read_ngroups_z:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 8);
+ case Intrinsic::r600_read_global_size_x:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 12);
+ case Intrinsic::r600_read_global_size_y:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 16);
+ case Intrinsic::r600_read_global_size_z:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 20);
+ case Intrinsic::r600_read_local_size_x:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 24);
+ case Intrinsic::r600_read_local_size_y:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 28);
+ case Intrinsic::r600_read_local_size_z:
+ return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 32);
+ case Intrinsic::r600_read_tgid_x:
+ return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
+ AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 0), VT);
+ case Intrinsic::r600_read_tgid_y:
+ return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
+ AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 1), VT);
+ case Intrinsic::r600_read_tgid_z:
+ return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
+ AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 2), VT);
+ case Intrinsic::r600_read_tidig_x:
+ return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ AMDGPU::VGPR0, VT);
+ case Intrinsic::r600_read_tidig_y:
+ return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ AMDGPU::VGPR1, VT);
+ case Intrinsic::r600_read_tidig_z:
+ return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
+ AMDGPU::VGPR2, VT);
+ case AMDGPUIntrinsic::SI_load_const: {
+ SDValue Ops [] = {
+ ResourceDescriptorToi128(Op.getOperand(1), DAG),
+ Op.getOperand(2)
+ };
+
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo(),
+ MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant,
+ VT.getSizeInBits() / 8, 4);
+ return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
+ Op->getVTList(), Ops, 2, VT, MMO);
+ }
+ case AMDGPUIntrinsic::SI_sample:
+ return LowerSampleIntrinsic(AMDGPUISD::SAMPLE, Op, DAG);
+ case AMDGPUIntrinsic::SI_sampleb:
+ return LowerSampleIntrinsic(AMDGPUISD::SAMPLEB, Op, DAG);
+ case AMDGPUIntrinsic::SI_sampled:
+ return LowerSampleIntrinsic(AMDGPUISD::SAMPLED, Op, DAG);
+ case AMDGPUIntrinsic::SI_samplel:
+ return LowerSampleIntrinsic(AMDGPUISD::SAMPLEL, Op, DAG);
+ case AMDGPUIntrinsic::SI_vs_load_input:
+ return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT,
+ ResourceDescriptorToi128(Op.getOperand(1), DAG),
+ Op.getOperand(2),
+ Op.getOperand(3));
+ }
+ }
+
+ case ISD::INTRINSIC_VOID:
+ SDValue Chain = Op.getOperand(0);
+ unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+
+ switch (IntrinsicID) {
+ case AMDGPUIntrinsic::SI_tbuffer_store: {
+ SDLoc DL(Op);
+ SDValue Ops [] = {
+ Chain,
+ ResourceDescriptorToi128(Op.getOperand(2), DAG),
+ Op.getOperand(3),
+ Op.getOperand(4),
+ Op.getOperand(5),
+ Op.getOperand(6),
+ Op.getOperand(7),
+ Op.getOperand(8),
+ Op.getOperand(9),
+ Op.getOperand(10),
+ Op.getOperand(11),
+ Op.getOperand(12),
+ Op.getOperand(13),
+ Op.getOperand(14)
+ };
+ EVT VT = Op.getOperand(3).getValueType();
+
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo(),
+ MachineMemOperand::MOStore,
+ VT.getSizeInBits() / 8, 4);
+ return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL,
+ Op->getVTList(), Ops,
+ sizeof(Ops)/sizeof(Ops[0]), VT, MMO);
+ }
+ default:
+ break;
+ }
}
return SDValue();
}
+SDValue SITargetLowering::LowerADD(SDValue Op,
+ SelectionDAG &DAG) const {
+ if (Op.getValueType() != MVT::i64)
+ return SDValue();
+
+ SDLoc DL(Op);
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+
+ SDValue Zero = DAG.getConstant(0, MVT::i32);
+ SDValue One = DAG.getConstant(1, MVT::i32);
+
+ SDValue Lo0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, Zero);
+ SDValue Hi0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, One);
+
+ SDValue Lo1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, Zero);
+ SDValue Hi1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, One);
+
+ SDVTList VTList = DAG.getVTList(MVT::i32, MVT::Glue);
+
+ SDValue AddLo = DAG.getNode(ISD::ADDC, DL, VTList, Lo0, Lo1);
+ SDValue Carry = AddLo.getValue(1);
+ SDValue AddHi = DAG.getNode(ISD::ADDE, DL, VTList, Hi0, Hi1, Carry);
+
+ return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, AddLo, AddHi.getValue(0));
+}
+
/// \brief Helper function for LowerBRCOND
static SDNode *findUser(SDValue Value, unsigned Opcode) {
@@ -265,7 +624,7 @@ static SDNode *findUser(SDValue Value, unsigned Opcode) {
SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
SelectionDAG &DAG) const {
- DebugLoc DL = BRCOND.getDebugLoc();
+ SDLoc DL(BRCOND);
SDNode *Intr = BRCOND.getOperand(1).getNode();
SDValue Target = BRCOND.getOperand(2);
@@ -338,30 +697,51 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
return Chain;
}
-#define RSRC_DATA_FORMAT 0xf00000000000
-
-SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
- SDValue Chain = Op.getOperand(0);
- SDValue Value = Op.getOperand(1);
- SDValue VirtualAddress = Op.getOperand(2);
- DebugLoc DL = Op.getDebugLoc();
+SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ LoadSDNode *Load = cast<LoadSDNode>(Op);
- if (StoreNode->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) {
+ if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
return SDValue();
- }
- SDValue SrcSrc = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128,
- DAG.getConstant(0, MVT::i64),
- DAG.getConstant(RSRC_DATA_FORMAT, MVT::i64));
+ SDValue TruncPtr = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
+ Load->getBasePtr(), DAG.getConstant(0, MVT::i32));
+ SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
+ DAG.getConstant(2, MVT::i32));
+
+ SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
+ Load->getChain(), Ptr,
+ DAG.getTargetConstant(0, MVT::i32),
+ Op.getOperand(2));
+ SDValue MergedValues[2] = {
+ Ret,
+ Load->getChain()
+ };
+ return DAG.getMergeValues(MergedValues, 2, DL);
+
+}
+
+SDValue SITargetLowering::ResourceDescriptorToi128(SDValue Op,
+ SelectionDAG &DAG) const {
- SDValue Ops[2];
- Ops[0] = DAG.getNode(AMDGPUISD::BUFFER_STORE, DL, MVT::Other, Chain,
- Value, SrcSrc, VirtualAddress);
- Ops[1] = Chain;
+ if (Op.getValueType() == MVT::i128) {
+ return Op;
+ }
- return DAG.getMergeValues(Ops, 2, DL);
+ assert(Op.getOpcode() == ISD::UNDEF);
+ return DAG.getNode(ISD::BUILD_PAIR, SDLoc(Op), MVT::i128,
+ DAG.getConstant(0, MVT::i64),
+ DAG.getConstant(0, MVT::i64));
+}
+
+SDValue SITargetLowering::LowerSampleIntrinsic(unsigned Opcode,
+ const SDValue &Op,
+ SelectionDAG &DAG) const {
+ return DAG.getNode(Opcode, SDLoc(Op), Op.getValueType(), Op.getOperand(1),
+ Op.getOperand(2),
+ ResourceDescriptorToi128(Op.getOperand(3), DAG),
+ Op.getOperand(4));
}
SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
@@ -371,7 +751,7 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue False = Op.getOperand(3);
SDValue CC = Op.getOperand(4);
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// Possible Min/Max pattern
SDValue MinMax = LowerMinMax(Op, DAG);
@@ -383,6 +763,84 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False);
}
+SDValue SITargetLowering::LowerSIGN_EXTEND(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+
+ if (VT != MVT::i64) {
+ return SDValue();
+ }
+
+ SDValue Hi = DAG.getNode(ISD::SRA, DL, MVT::i32, Op.getOperand(0),
+ DAG.getConstant(31, MVT::i32));
+
+ return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0), Hi);
+}
+
+SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
+ EVT VT = Store->getMemoryVT();
+
+ SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
+ if (Ret.getNode())
+ return Ret;
+
+ if (VT.isVector() && VT.getVectorNumElements() >= 8)
+ return SplitVectorStore(Op, DAG);
+
+ if (Store->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
+ return SDValue();
+
+ SDValue TruncPtr = DAG.getZExtOrTrunc(Store->getBasePtr(), DL, MVT::i32);
+ SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, TruncPtr,
+ DAG.getConstant(2, MVT::i32));
+ SDValue Chain = Store->getChain();
+ SmallVector<SDValue, 8> Values;
+
+ if (VT == MVT::i64) {
+ for (unsigned i = 0; i < 2; ++i) {
+ Values.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
+ Store->getValue(), DAG.getConstant(i, MVT::i32)));
+ }
+ } else if (VT == MVT::i128) {
+ for (unsigned i = 0; i < 2; ++i) {
+ for (unsigned j = 0; j < 2; ++j) {
+ Values.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
+ DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64,
+ Store->getValue(), DAG.getConstant(i, MVT::i32)),
+ DAG.getConstant(j, MVT::i32)));
+ }
+ }
+ } else {
+ Values.push_back(Store->getValue());
+ }
+
+ for (unsigned i = 0; i < Values.size(); ++i) {
+ SDValue PartPtr = DAG.getNode(ISD::ADD, DL, MVT::i32,
+ Ptr, DAG.getConstant(i, MVT::i32));
+ Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
+ Chain, Values[i], PartPtr,
+ DAG.getTargetConstant(0, MVT::i32));
+ }
+ return Chain;
+}
+
+
+SDValue SITargetLowering::LowerZERO_EXTEND(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+
+ if (VT != MVT::i64) {
+ return SDValue();
+ }
+
+ return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0),
+ DAG.getConstant(0, MVT::i32));
+}
+
//===----------------------------------------------------------------------===//
// Custom DAG optimizations
//===----------------------------------------------------------------------===//
@@ -390,13 +848,12 @@ SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
switch (N->getOpcode()) {
default: break;
case ISD::SELECT_CC: {
- N->dump();
ConstantSDNode *True, *False;
// i1 selectcc(l, r, -1, 0, cc) -> i1 setcc(l, r, cc)
if ((True = dyn_cast<ConstantSDNode>(N->getOperand(2)))
@@ -433,13 +890,13 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
return SDValue();
}
-/// \brief Test if RegClass is one of the VSrc classes
+/// \brief Test if RegClass is one of the VSrc classes
static bool isVSrc(unsigned RegClass) {
return AMDGPU::VSrc_32RegClassID == RegClass ||
AMDGPU::VSrc_64RegClassID == RegClass;
}
-/// \brief Test if RegClass is one of the SSrc classes
+/// \brief Test if RegClass is one of the SSrc classes
static bool isSSrc(unsigned RegClass) {
return AMDGPU::SSrc_32RegClassID == RegClass ||
AMDGPU::SSrc_64RegClassID == RegClass;
@@ -481,6 +938,8 @@ bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate,
bool &ScalarSlotUsed) const {
MachineSDNode *Mov = dyn_cast<MachineSDNode>(Operand);
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
if (Mov == 0 || !TII->isMov(Mov->getMachineOpcode()))
return false;
@@ -512,30 +971,67 @@ bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate,
return false;
}
+const TargetRegisterClass *SITargetLowering::getRegClassForNode(
+ SelectionDAG &DAG, const SDValue &Op) const {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ const SIRegisterInfo &TRI = TII->getRegisterInfo();
+
+ if (!Op->isMachineOpcode()) {
+ switch(Op->getOpcode()) {
+ case ISD::CopyFromReg: {
+ MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
+ unsigned Reg = cast<RegisterSDNode>(Op->getOperand(1))->getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ return MRI.getRegClass(Reg);
+ }
+ return TRI.getPhysRegClass(Reg);
+ }
+ default: return NULL;
+ }
+ }
+ const MCInstrDesc &Desc = TII->get(Op->getMachineOpcode());
+ int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass;
+ if (OpClassID != -1) {
+ return TRI.getRegClass(OpClassID);
+ }
+ switch(Op.getMachineOpcode()) {
+ case AMDGPU::COPY_TO_REGCLASS:
+ // Operand 1 is the register class id for COPY_TO_REGCLASS instructions.
+ OpClassID = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
+
+ // If the COPY_TO_REGCLASS instruction is copying to a VSrc register
+ // class, then the register class for the value could be either a
+ // VReg or and SReg. In order to get a more accurate
+ if (OpClassID == AMDGPU::VSrc_32RegClassID ||
+ OpClassID == AMDGPU::VSrc_64RegClassID) {
+ return getRegClassForNode(DAG, Op.getOperand(0));
+ }
+ return TRI.getRegClass(OpClassID);
+ case AMDGPU::EXTRACT_SUBREG: {
+ int SubIdx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ const TargetRegisterClass *SuperClass =
+ getRegClassForNode(DAG, Op.getOperand(0));
+ return TRI.getSubClassWithSubReg(SuperClass, SubIdx);
+ }
+ case AMDGPU::REG_SEQUENCE:
+ // Operand 0 is the register class id for REG_SEQUENCE instructions.
+ return TRI.getRegClass(
+ cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue());
+ default:
+ return getRegClassFor(Op.getSimpleValueType());
+ }
+}
+
/// \brief Does "Op" fit into register class "RegClass" ?
-bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, SDValue &Op,
+bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, const SDValue &Op,
unsigned RegClass) const {
-
- MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
- SDNode *Node = Op.getNode();
-
- const TargetRegisterClass *OpClass;
- if (MachineSDNode *MN = dyn_cast<MachineSDNode>(Node)) {
- const MCInstrDesc &Desc = TII->get(MN->getMachineOpcode());
- int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass;
- if (OpClassID == -1)
- OpClass = getRegClassFor(Op.getSimpleValueType());
- else
- OpClass = TRI->getRegClass(OpClassID);
-
- } else if (Node->getOpcode() == ISD::CopyFromReg) {
- RegisterSDNode *Reg = cast<RegisterSDNode>(Node->getOperand(1).getNode());
- OpClass = MRI.getRegClass(Reg->getReg());
-
- } else
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterClass *RC = getRegClassForNode(DAG, Op);
+ if (!RC) {
return false;
-
- return TRI->getRegClass(RegClass)->hasSubClassEq(OpClass);
+ }
+ return TRI->getRegClass(RegClass)->hasSubClassEq(RC);
}
/// \brief Make sure that we don't exeed the number of allowed scalars
@@ -561,20 +1057,33 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
return;
}
- // This is a conservative aproach, it is possible that we can't determine
- // the correct register class and copy too often, but better save than sorry.
+ // This is a conservative aproach. It is possible that we can't determine the
+ // correct register class and copy too often, but better safe than sorry.
SDValue RC = DAG.getTargetConstant(RegClass, MVT::i32);
- SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DebugLoc(),
+ SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, SDLoc(),
Operand.getValueType(), Operand, RC);
Operand = SDValue(Node, 0);
}
+/// \returns true if \p Node's operands are different from the SDValue list
+/// \p Ops
+static bool isNodeChanged(const SDNode *Node, const std::vector<SDValue> &Ops) {
+ for (unsigned i = 0, e = Node->getNumOperands(); i < e; ++i) {
+ if (Ops[i].getNode() != Node->getOperand(i).getNode()) {
+ return true;
+ }
+ }
+ return false;
+}
+
/// \brief Try to fold the Nodes operands into the Node
SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
SelectionDAG &DAG) const {
// Original encoding (either e32 or e64)
int Opcode = Node->getMachineOpcode();
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
const MCInstrDesc *Desc = &TII->get(Opcode);
unsigned NumDefs = Desc->getNumDefs();
@@ -700,13 +1209,19 @@ SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
for (unsigned i = NumOps - NumDefs, e = Node->getNumOperands(); i < e; ++i)
Ops.push_back(Node->getOperand(i));
+ // Nodes that have a glue result are not CSE'd by getMachineNode(), so in
+ // this case a brand new node is always be created, even if the operands
+ // are the same as before. So, manually check if anything has been changed.
+ if (Desc->Opcode == Opcode && !isNodeChanged(Node, Ops)) {
+ return Node;
+ }
+
// Create a complete new instruction
- return DAG.getMachineNode(Desc->Opcode, Node->getDebugLoc(),
- Node->getVTList(), Ops);
+ return DAG.getMachineNode(Desc->Opcode, SDLoc(Node), Node->getVTList(), Ops);
}
/// \brief Helper function for adjustWritemask
-unsigned SubIdx2Lane(unsigned Idx) {
+static unsigned SubIdx2Lane(unsigned Idx) {
switch (Idx) {
default: return 0;
case AMDGPU::sub0: return 0;
@@ -720,7 +1235,9 @@ unsigned SubIdx2Lane(unsigned Idx) {
void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
SelectionDAG &DAG) const {
SDNode *Users[4] = { };
- unsigned Writemask = 0, Lane = 0;
+ unsigned Lane = 0;
+ unsigned OldDmask = Node->getConstantOperandVal(0);
+ unsigned NewDmask = 0;
// Try to figure out the used register components
for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
@@ -731,32 +1248,45 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
return;
+ // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used.
+ // Note that subregs are packed, i.e. Lane==0 is the first bit set
+ // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
+ // set, etc.
Lane = SubIdx2Lane(I->getConstantOperandVal(1));
+ // Set which texture component corresponds to the lane.
+ unsigned Comp;
+ for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
+ assert(Dmask);
+ Comp = countTrailingZeros(Dmask);
+ Dmask &= ~(1 << Comp);
+ }
+
// Abort if we have more than one user per component
if (Users[Lane])
return;
Users[Lane] = *I;
- Writemask |= 1 << Lane;
+ NewDmask |= 1 << Comp;
}
- // Abort if all components are used
- if (Writemask == 0xf)
+ // Abort if there's no change
+ if (NewDmask == OldDmask)
return;
// Adjust the writemask in the node
std::vector<SDValue> Ops;
- Ops.push_back(DAG.getTargetConstant(Writemask, MVT::i32));
+ Ops.push_back(DAG.getTargetConstant(NewDmask, MVT::i32));
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
Ops.push_back(Node->getOperand(i));
Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size());
// If we only got one lane, replace it with a copy
- if (Writemask == (1U << Lane)) {
+ // (if NewDmask has only one bit set...)
+ if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
SDValue RC = DAG.getTargetConstant(AMDGPU::VReg_32RegClassID, MVT::i32);
SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
- DebugLoc(), Users[Lane]->getValueType(0),
+ SDLoc(), Users[Lane]->getValueType(0),
SDValue(Node, 0), RC);
DAG.ReplaceAllUsesWith(Users[Lane], Copy);
return;
@@ -784,8 +1314,11 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
/// \brief Fold the instructions after slecting them
SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
SelectionDAG &DAG) const {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ Node = AdjustRegClass(Node, DAG);
- if (AMDGPU::isMIMG(Node->getMachineOpcode()) != -1)
+ if (TII->isMIMG(Node->getMachineOpcode()))
adjustWritemask(Node, DAG);
return foldOperands(Node, DAG);
@@ -795,7 +1328,9 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
/// bits set in the writemask
void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
SDNode *Node) const {
- if (AMDGPU::isMIMG(MI->getOpcode()) == -1)
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
+ if (!TII->isMIMG(MI->getOpcode()))
return;
unsigned VReg = MI->getOperand(0).getReg();
@@ -812,6 +1347,53 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
case 3: RC = &AMDGPU::VReg_96RegClass; break;
}
+ unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet);
+ MI->setDesc(TII->get(NewOpcode));
MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
MRI.setRegClass(VReg, RC);
}
+
+MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N,
+ SelectionDAG &DAG) const {
+
+ SDLoc DL(N);
+ unsigned NewOpcode = N->getMachineOpcode();
+
+ switch (N->getMachineOpcode()) {
+ default: return N;
+ case AMDGPU::S_LOAD_DWORD_IMM:
+ NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
+ // Fall-through
+ case AMDGPU::S_LOAD_DWORDX2_SGPR:
+ if (NewOpcode == N->getMachineOpcode()) {
+ NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
+ }
+ // Fall-through
+ case AMDGPU::S_LOAD_DWORDX4_IMM:
+ case AMDGPU::S_LOAD_DWORDX4_SGPR: {
+ if (NewOpcode == N->getMachineOpcode()) {
+ NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
+ }
+ if (fitsRegClass(DAG, N->getOperand(0), AMDGPU::SReg_64RegClassID)) {
+ return N;
+ }
+ ConstantSDNode *Offset = cast<ConstantSDNode>(N->getOperand(1));
+ SDValue Ops[] = {
+ SDValue(DAG.getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::i128,
+ DAG.getConstant(0, MVT::i64)), 0),
+ N->getOperand(0),
+ DAG.getConstant(Offset->getSExtValue() << 2, MVT::i32)
+ };
+ return DAG.getMachineNode(NewOpcode, DL, N->getVTList(), Ops);
+ }
+ }
+}
+
+SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
+ const TargetRegisterClass *RC,
+ unsigned Reg, EVT VT) const {
+ SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT);
+
+ return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()),
+ cast<RegisterSDNode>(VReg)->getReg(), VT);
+}
diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/R600/SIISelLowering.h
index de637be..9933ece 100644
--- a/lib/Target/R600/SIISelLowering.h
+++ b/lib/Target/R600/SIISelLowering.h
@@ -21,35 +21,48 @@
namespace llvm {
class SITargetLowering : public AMDGPUTargetLowering {
- const SIInstrInfo * TII;
- const TargetRegisterInfo * TRI;
-
- SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, SDLoc DL,
+ SDValue Chain, unsigned Offset) const;
+ SDValue LowerSampleIntrinsic(unsigned Opcode, const SDValue &Op,
+ SelectionDAG &DAG) const;
+ SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue ResourceDescriptorToi128(SDValue Op, SelectionDAG &DAG) const;
bool foldImm(SDValue &Operand, int32_t &Immediate,
bool &ScalarSlotUsed) const;
- bool fitsRegClass(SelectionDAG &DAG, SDValue &Op, unsigned RegClass) const;
- void ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
+ const TargetRegisterClass *getRegClassForNode(SelectionDAG &DAG,
+ const SDValue &Op) const;
+ bool fitsRegClass(SelectionDAG &DAG, const SDValue &Op,
+ unsigned RegClass) const;
+ void ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
unsigned RegClass, bool &ScalarSlotUsed) const;
SDNode *foldOperands(MachineSDNode *N, SelectionDAG &DAG) const;
void adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
+ MachineSDNode *AdjustRegClass(MachineSDNode *N, SelectionDAG &DAG) const;
public:
SITargetLowering(TargetMachine &tm);
+ bool allowsUnalignedMemoryAccesses(EVT VT, bool *IsFast) const;
+ virtual bool shouldSplitVectorElementType(EVT VT) const;
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI,
MachineBasicBlock * BB) const;
- virtual EVT getSetCCResultType(EVT VT) const;
+ virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
virtual MVT getScalarShiftAmountTy(EVT VT) const;
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const;
@@ -57,6 +70,8 @@ public:
SDNode *Node) const;
int32_t analyzeImmediate(const SDNode *N) const;
+ SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC,
+ unsigned Reg, EVT VT) const;
};
} // End namespace llvm
diff --git a/lib/Target/R600/SIInsertWaits.cpp b/lib/Target/R600/SIInsertWaits.cpp
index 98bd3db..7ef662e 100644
--- a/lib/Target/R600/SIInsertWaits.cpp
+++ b/lib/Target/R600/SIInsertWaits.cpp
@@ -47,7 +47,7 @@ class SIInsertWaits : public MachineFunctionPass {
private:
static char ID;
const SIInstrInfo *TII;
- const SIRegisterInfo &TRI;
+ const SIRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
/// \brief Constant hardware limits
@@ -97,8 +97,9 @@ private:
public:
SIInsertWaits(TargetMachine &tm) :
MachineFunctionPass(ID),
- TII(static_cast<const SIInstrInfo*>(tm.getInstrInfo())),
- TRI(TII->getRegisterInfo()) { }
+ TII(0),
+ TRI(0),
+ ExpInstrTypesSeen(0) { }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -133,12 +134,19 @@ Counters SIInsertWaits::getHwCounts(MachineInstr &MI) {
// LGKM may uses larger values
if (TSFlags & SIInstrFlags::LGKM_CNT) {
- MachineOperand &Op = MI.getOperand(0);
- assert(Op.isReg() && "First LGKM operand must be a register!");
+ if (TII->isSMRD(MI.getOpcode())) {
- unsigned Reg = Op.getReg();
- unsigned Size = TRI.getMinimalPhysRegClass(Reg)->getSize();
- Result.Named.LGKM = Size > 4 ? 2 : 1;
+ MachineOperand &Op = MI.getOperand(0);
+ assert(Op.isReg() && "First LGKM operand must be a register!");
+
+ unsigned Reg = Op.getReg();
+ unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
+ Result.Named.LGKM = Size > 4 ? 2 : 1;
+
+ } else {
+ // DS
+ Result.Named.LGKM = 1;
+ }
} else {
Result.Named.LGKM = 0;
@@ -178,16 +186,16 @@ bool SIInsertWaits::isOpRelevant(MachineOperand &Op) {
RegInterval SIInsertWaits::getRegInterval(MachineOperand &Op) {
- if (!Op.isReg())
+ if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg()))
return std::make_pair(0, 0);
unsigned Reg = Op.getReg();
- unsigned Size = TRI.getMinimalPhysRegClass(Reg)->getSize();
+ unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
assert(Size >= 4);
RegInterval Result;
- Result.first = TRI.getEncodingValue(Reg);
+ Result.first = TRI->getEncodingValue(Reg);
Result.second = Result.first + Size / 4;
return Result;
@@ -328,9 +336,11 @@ Counters SIInsertWaits::handleOperands(MachineInstr &MI) {
}
bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
-
bool Changes = false;
+ TII = static_cast<const SIInstrInfo*>(MF.getTarget().getInstrInfo());
+ TRI = static_cast<const SIRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
MRI = &MF.getRegInfo();
WaitedOn = ZeroCounts;
diff --git a/lib/Target/R600/SIInstrFormats.td b/lib/Target/R600/SIInstrFormats.td
index f737ddd..53ebaaf 100644
--- a/lib/Target/R600/SIInstrFormats.td
+++ b/lib/Target/R600/SIInstrFormats.td
@@ -17,10 +17,24 @@ class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
field bits<1> VM_CNT = 0;
field bits<1> EXP_CNT = 0;
field bits<1> LGKM_CNT = 0;
+ field bits<1> MIMG = 0;
+ field bits<1> SMRD = 0;
+ field bits<1> VOP1 = 0;
+ field bits<1> VOP2 = 0;
+ field bits<1> VOP3 = 0;
+ field bits<1> VOPC = 0;
+ field bits<1> SALU = 0;
let TSFlags{0} = VM_CNT;
let TSFlags{1} = EXP_CNT;
let TSFlags{2} = LGKM_CNT;
+ let TSFlags{3} = MIMG;
+ let TSFlags{4} = SMRD;
+ let TSFlags{5} = VOP1;
+ let TSFlags{6} = VOP2;
+ let TSFlags{7} = VOP3;
+ let TSFlags{8} = VOPC;
+ let TSFlags{9} = SALU;
}
class Enc32 <dag outs, dag ins, string asm, list<dag> pattern> :
@@ -55,6 +69,7 @@ class SOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let SALU = 1;
}
class SOP2 <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -73,6 +88,7 @@ class SOP2 <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let SALU = 1;
}
class SOPC <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -90,6 +106,7 @@ class SOPC <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let SALU = 1;
}
class SOPK <bits<5> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -106,6 +123,7 @@ class SOPK <bits<5> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let SALU = 1;
}
class SOPP <bits<7> op, dag ins, string asm, list<dag> pattern> : Enc32 <
@@ -123,6 +141,7 @@ class SOPP <bits<7> op, dag ins, string asm, list<dag> pattern> : Enc32 <
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let SALU = 1;
}
class SMRD <bits<5> op, bits<1> imm, dag outs, dag ins, string asm,
@@ -140,6 +159,7 @@ class SMRD <bits<5> op, bits<1> imm, dag outs, dag ins, string asm,
let Inst{31-27} = 0x18; //encoding
let LGKM_CNT = 1;
+ let SMRD = 1;
}
//===----------------------------------------------------------------------===//
@@ -162,6 +182,8 @@ class VOP1 <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP1 = 1;
}
class VOP2 <bits<6> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -180,60 +202,66 @@ class VOP2 <bits<6> op, dag outs, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP2 = 1;
}
class VOP3 <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
Enc64 <outs, ins, asm, pattern> {
- bits<8> VDST;
- bits<9> SRC0;
- bits<9> SRC1;
- bits<9> SRC2;
- bits<3> ABS;
- bits<1> CLAMP;
- bits<2> OMOD;
- bits<3> NEG;
-
- let Inst{7-0} = VDST;
- let Inst{10-8} = ABS;
- let Inst{11} = CLAMP;
+ bits<8> dst;
+ bits<9> src0;
+ bits<9> src1;
+ bits<9> src2;
+ bits<3> abs;
+ bits<1> clamp;
+ bits<2> omod;
+ bits<3> neg;
+
+ let Inst{7-0} = dst;
+ let Inst{10-8} = abs;
+ let Inst{11} = clamp;
let Inst{25-17} = op;
let Inst{31-26} = 0x34; //encoding
- let Inst{40-32} = SRC0;
- let Inst{49-41} = SRC1;
- let Inst{58-50} = SRC2;
- let Inst{60-59} = OMOD;
- let Inst{63-61} = NEG;
+ let Inst{40-32} = src0;
+ let Inst{49-41} = src1;
+ let Inst{58-50} = src2;
+ let Inst{60-59} = omod;
+ let Inst{63-61} = neg;
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP3 = 1;
}
class VOP3b <bits<9> op, dag outs, dag ins, string asm, list<dag> pattern> :
Enc64 <outs, ins, asm, pattern> {
- bits<8> VDST;
- bits<9> SRC0;
- bits<9> SRC1;
- bits<9> SRC2;
- bits<7> SDST;
- bits<2> OMOD;
- bits<3> NEG;
+ bits<8> dst;
+ bits<9> src0;
+ bits<9> src1;
+ bits<9> src2;
+ bits<7> sdst;
+ bits<2> omod;
+ bits<3> neg;
- let Inst{7-0} = VDST;
- let Inst{14-8} = SDST;
+ let Inst{7-0} = dst;
+ let Inst{14-8} = sdst;
let Inst{25-17} = op;
let Inst{31-26} = 0x34; //encoding
- let Inst{40-32} = SRC0;
- let Inst{49-41} = SRC1;
- let Inst{58-50} = SRC2;
- let Inst{60-59} = OMOD;
- let Inst{63-61} = NEG;
+ let Inst{40-32} = src0;
+ let Inst{49-41} = src1;
+ let Inst{58-50} = src2;
+ let Inst{60-59} = omod;
+ let Inst{63-61} = neg;
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let UseNamedOperandTable = 1;
+ let VOP3 = 1;
}
class VOPC <bits<8> op, dag ins, string asm, list<dag> pattern> :
@@ -251,6 +279,7 @@ class VOPC <bits<8> op, dag ins, string asm, list<dag> pattern> :
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
+ let VOPC = 1;
}
class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> :
@@ -281,6 +310,30 @@ class VINTRP <bits <2> op, dag outs, dag ins, string asm, list<dag> pattern> :
let Uses = [EXEC] in {
+class DS <bits<8> op, dag outs, dag ins, string asm, list<dag> pattern> :
+ Enc64 <outs, ins, asm, pattern> {
+
+ bits<8> vdst;
+ bits<1> gds;
+ bits<8> addr;
+ bits<8> data0;
+ bits<8> data1;
+ bits<8> offset0;
+ bits<8> offset1;
+
+ let Inst{7-0} = offset0;
+ let Inst{15-8} = offset1;
+ let Inst{17} = gds;
+ let Inst{25-18} = op;
+ let Inst{31-26} = 0x36; //encoding
+ let Inst{39-32} = addr;
+ let Inst{47-40} = data0;
+ let Inst{55-48} = data1;
+ let Inst{63-56} = vdst;
+
+ let LGKM_CNT = 1;
+}
+
class MUBUF <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
Enc64<outs, ins, asm, pattern> {
@@ -390,6 +443,7 @@ class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let VM_CNT = 1;
let EXP_CNT = 1;
+ let MIMG = 1;
}
def EXP : Enc64<
diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
index 9a04c60..ab55c1b 100644
--- a/lib/Target/R600/SIInstrInfo.cpp
+++ b/lib/Target/R600/SIInstrInfo.cpp
@@ -15,22 +15,26 @@
#include "SIInstrInfo.h"
#include "AMDGPUTargetMachine.h"
+#include "SIDefines.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCInstrDesc.h"
-#include <stdio.h>
using namespace llvm;
SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
: AMDGPUInstrInfo(tm),
- RI(tm, *this)
+ RI(tm)
{ }
const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
return RI;
}
+//===----------------------------------------------------------------------===//
+// TargetInstrInfo callbacks
+//===----------------------------------------------------------------------===//
+
void
SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
@@ -42,27 +46,27 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// never be necessary.
assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
- const int16_t Sub0_15[] = {
+ static const int16_t Sub0_15[] = {
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
};
- const int16_t Sub0_7[] = {
+ static const int16_t Sub0_7[] = {
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
};
- const int16_t Sub0_3[] = {
+ static const int16_t Sub0_3[] = {
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
};
- const int16_t Sub0_2[] = {
+ static const int16_t Sub0_2[] = {
AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
};
- const int16_t Sub0_1[] = {
+ static const int16_t Sub0_1[] = {
AMDGPU::sub0, AMDGPU::sub1, 0
};
@@ -118,14 +122,14 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
} else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
- AMDGPU::SReg_32RegClass.contains(SrcReg));
+ AMDGPU::SReg_32RegClass.contains(SrcReg));
BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
return;
} else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
- AMDGPU::SReg_64RegClass.contains(SrcReg));
+ AMDGPU::SReg_64RegClass.contains(SrcReg));
Opcode = AMDGPU::V_MOV_B32_e32;
SubIndices = Sub0_1;
@@ -136,19 +140,19 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
} else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
- AMDGPU::SReg_128RegClass.contains(SrcReg));
+ AMDGPU::SReg_128RegClass.contains(SrcReg));
Opcode = AMDGPU::V_MOV_B32_e32;
SubIndices = Sub0_3;
} else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
- AMDGPU::SReg_256RegClass.contains(SrcReg));
+ AMDGPU::SReg_256RegClass.contains(SrcReg));
Opcode = AMDGPU::V_MOV_B32_e32;
SubIndices = Sub0_7;
} else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
- AMDGPU::SReg_512RegClass.contains(SrcReg));
+ AMDGPU::SReg_512RegClass.contains(SrcReg));
Opcode = AMDGPU::V_MOV_B32_e32;
SubIndices = Sub0_15;
@@ -168,7 +172,6 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
}
unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
-
int NewOpc;
// Try to map original to commuted opcode
@@ -185,11 +188,36 @@ unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
bool NewMI) const {
- if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
- !MI->getOperand(2).isReg())
+ MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
return 0;
- MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
+ // Cannot commute VOP2 if src0 is SGPR.
+ if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() &&
+ RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg())))
+ return 0;
+
+ if (!MI->getOperand(2).isReg()) {
+ // XXX: Commute instructions with FPImm operands
+ if (NewMI || MI->getOperand(2).isFPImm() ||
+ (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
+ return 0;
+ }
+
+ // XXX: Commute VOP3 instructions with abs and neg set.
+ if (isVOP3(MI->getOpcode()) &&
+ (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::abs)).getImm() ||
+ MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::neg)).getImm()))
+ return 0;
+
+ unsigned Reg = MI->getOperand(1).getReg();
+ MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
+ MI->getOperand(2).ChangeToRegister(Reg, false);
+ } else {
+ MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
+ }
if (MI)
MI->setDesc(get(commuteOpcode(MI->getOpcode())));
@@ -197,15 +225,12 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
return MI;
}
-MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
- int64_t Imm) const {
- MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
- MachineInstrBuilder MIB(*MF, MI);
- MIB.addReg(DstReg, RegState::Define);
- MIB.addImm(Imm);
-
- return MI;
-
+MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg,
+ unsigned SrcReg) const {
+ return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
+ DstReg) .addReg(SrcReg);
}
bool SIInstrInfo::isMov(unsigned Opcode) const {
@@ -224,32 +249,397 @@ SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
return RC != &AMDGPU::EXECRegRegClass;
}
-//===----------------------------------------------------------------------===//
-// Indirect addressing callbacks
-//===----------------------------------------------------------------------===//
+int SIInstrInfo::isMIMG(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::MIMG;
+}
-unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
- unsigned Channel) const {
- assert(Channel == 0);
- return RegIndex;
+int SIInstrInfo::isSMRD(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::SMRD;
+}
+
+bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOP1;
+}
+
+bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOP2;
+}
+
+bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOP3;
+}
+
+bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::VOPC;
+}
+
+bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
+ return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
+}
+
+bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
+ if(MO.isImm()) {
+ return MO.getImm() >= -16 && MO.getImm() <= 64;
+ }
+ if (MO.isFPImm()) {
+ return MO.getFPImm()->isExactlyValue(0.0) ||
+ MO.getFPImm()->isExactlyValue(0.5) ||
+ MO.getFPImm()->isExactlyValue(-0.5) ||
+ MO.getFPImm()->isExactlyValue(1.0) ||
+ MO.getFPImm()->isExactlyValue(-1.0) ||
+ MO.getFPImm()->isExactlyValue(2.0) ||
+ MO.getFPImm()->isExactlyValue(-2.0) ||
+ MO.getFPImm()->isExactlyValue(4.0) ||
+ MO.getFPImm()->isExactlyValue(-4.0);
+ }
+ return false;
+}
+
+bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
+ return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
+}
+
+bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
+ StringRef &ErrInfo) const {
+ uint16_t Opcode = MI->getOpcode();
+ int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
+ int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
+ int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
+
+ // Verify VOP*
+ if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
+ unsigned ConstantBusCount = 0;
+ unsigned SGPRUsed = AMDGPU::NoRegister;
+ for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isUse() &&
+ !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
+
+ // EXEC register uses the constant bus.
+ if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
+ ++ConstantBusCount;
+
+ // SGPRs use the constant bus
+ if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
+ (!MO.isImplicit() &&
+ (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
+ AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
+ if (SGPRUsed != MO.getReg()) {
+ ++ConstantBusCount;
+ SGPRUsed = MO.getReg();
+ }
+ }
+ }
+ // Literal constants use the constant bus.
+ if (isLiteralConstant(MO))
+ ++ConstantBusCount;
+ }
+ if (ConstantBusCount > 1) {
+ ErrInfo = "VOP* instruction uses the constant bus more than once";
+ return false;
+ }
+ }
+
+ // Verify SRC1 for VOP2 and VOPC
+ if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
+ const MachineOperand &Src1 = MI->getOperand(Src1Idx);
+ if (Src1.isImm() || Src1.isFPImm()) {
+ ErrInfo = "VOP[2C] src1 cannot be an immediate.";
+ return false;
+ }
+ }
+
+ // Verify VOP3
+ if (isVOP3(Opcode)) {
+ if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
+ ErrInfo = "VOP3 src0 cannot be a literal constant.";
+ return false;
+ }
+ if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
+ ErrInfo = "VOP3 src1 cannot be a literal constant.";
+ return false;
+ }
+ if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
+ ErrInfo = "VOP3 src2 cannot be a literal constant.";
+ return false;
+ }
+ }
+ return true;
+}
+
+unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ default: return AMDGPU::INSTRUCTION_LIST_END;
+ case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
+ case AMDGPU::COPY: return AMDGPU::COPY;
+ case AMDGPU::PHI: return AMDGPU::PHI;
+ case AMDGPU::S_ADD_I32: return AMDGPU::V_ADD_I32_e32;
+ case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
+ case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
+ case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
+ case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
+ case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
+ case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
+ case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
+ case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
+ case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
+ }
+}
+
+bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
+ return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
+}
+
+const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
+ unsigned OpNo) const {
+ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ const MCInstrDesc &Desc = get(MI.getOpcode());
+ if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
+ Desc.OpInfo[OpNo].RegClass == -1)
+ return MRI.getRegClass(MI.getOperand(OpNo).getReg());
+
+ unsigned RCID = Desc.OpInfo[OpNo].RegClass;
+ return RI.getRegClass(RCID);
+}
+
+bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
+ switch (MI.getOpcode()) {
+ case AMDGPU::COPY:
+ case AMDGPU::REG_SEQUENCE:
+ return RI.hasVGPRs(getOpRegClass(MI, 0));
+ default:
+ return RI.hasVGPRs(getOpRegClass(MI, OpNo));
+ }
}
+void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
+ MachineBasicBlock::iterator I = MI;
+ MachineOperand &MO = MI->getOperand(OpIdx);
+ MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
+ const TargetRegisterClass *RC = RI.getRegClass(RCID);
+ unsigned Opcode = AMDGPU::V_MOV_B32_e32;
+ if (MO.isReg()) {
+ Opcode = AMDGPU::COPY;
+ } else if (RI.isSGPRClass(RC)) {
+ Opcode = AMDGPU::S_MOV_B32;
+ }
+
+ const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
+ unsigned Reg = MRI.createVirtualRegister(VRC);
+ BuildMI(*MI->getParent(), I, MI->getParent()->findDebugLoc(I), get(Opcode),
+ Reg).addOperand(MO);
+ MO.ChangeToRegister(Reg, false);
+}
+
+void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
+ MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::src0);
+ int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::src1);
+ int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::src2);
+
+ // Legalize VOP2
+ if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
+ MachineOperand &Src0 = MI->getOperand(Src0Idx);
+ MachineOperand &Src1 = MI->getOperand(Src1Idx);
+
+ // If the instruction implicitly reads VCC, we can't have any SGPR operands,
+ // so move any.
+ bool ReadsVCC = MI->readsRegister(AMDGPU::VCC, &RI);
+ if (ReadsVCC && Src0.isReg() &&
+ RI.isSGPRClass(MRI.getRegClass(Src0.getReg()))) {
+ legalizeOpWithMove(MI, Src0Idx);
+ return;
+ }
+
+ if (ReadsVCC && Src1.isReg() &&
+ RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
+ legalizeOpWithMove(MI, Src1Idx);
+ return;
+ }
+
+ // Legalize VOP2 instructions where src1 is not a VGPR. An SGPR input must
+ // be the first operand, and there can only be one.
+ if (Src1.isImm() || Src1.isFPImm() ||
+ (Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())))) {
+ if (MI->isCommutable()) {
+ if (commuteInstruction(MI))
+ return;
+ }
+ legalizeOpWithMove(MI, Src1Idx);
+ }
+ }
+
+ // XXX - Do any VOP3 instructions read VCC?
+ // Legalize VOP3
+ if (isVOP3(MI->getOpcode())) {
+ int VOP3Idx[3] = {Src0Idx, Src1Idx, Src2Idx};
+ unsigned SGPRReg = AMDGPU::NoRegister;
+ for (unsigned i = 0; i < 3; ++i) {
+ int Idx = VOP3Idx[i];
+ if (Idx == -1)
+ continue;
+ MachineOperand &MO = MI->getOperand(Idx);
+
+ if (MO.isReg()) {
+ if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
+ continue; // VGPRs are legal
+
+ assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
+
+ if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
+ SGPRReg = MO.getReg();
+ // We can use one SGPR in each VOP3 instruction.
+ continue;
+ }
+ } else if (!isLiteralConstant(MO)) {
+ // If it is not a register and not a literal constant, then it must be
+ // an inline constant which is always legal.
+ continue;
+ }
+ // If we make it this far, then the operand is not legal and we must
+ // legalize it.
+ legalizeOpWithMove(MI, Idx);
+ }
+ }
+
+ // Legalize REG_SEQUENCE
+ // The register class of the operands much be the same type as the register
+ // class of the output.
+ if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
+ const TargetRegisterClass *RC = NULL, *SRC = NULL, *VRC = NULL;
+ for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
+ if (!MI->getOperand(i).isReg() ||
+ !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
+ continue;
+ const TargetRegisterClass *OpRC =
+ MRI.getRegClass(MI->getOperand(i).getReg());
+ if (RI.hasVGPRs(OpRC)) {
+ VRC = OpRC;
+ } else {
+ SRC = OpRC;
+ }
+ }
+
+ // If any of the operands are VGPR registers, then they all most be
+ // otherwise we will create illegal VGPR->SGPR copies when legalizing
+ // them.
+ if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
+ if (!VRC) {
+ assert(SRC);
+ VRC = RI.getEquivalentVGPRClass(SRC);
+ }
+ RC = VRC;
+ } else {
+ RC = SRC;
+ }
-int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
- llvm_unreachable("Unimplemented");
+ // Update all the operands so they have the same type.
+ for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
+ if (!MI->getOperand(i).isReg() ||
+ !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
+ continue;
+ unsigned DstReg = MRI.createVirtualRegister(RC);
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+ get(AMDGPU::COPY), DstReg)
+ .addOperand(MI->getOperand(i));
+ MI->getOperand(i).setReg(DstReg);
+ }
+ }
}
-int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
- llvm_unreachable("Unimplemented");
+void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
+ SmallVector<MachineInstr *, 128> Worklist;
+ Worklist.push_back(&TopInst);
+
+ while (!Worklist.empty()) {
+ MachineInstr *Inst = Worklist.pop_back_val();
+ unsigned NewOpcode = getVALUOp(*Inst);
+ if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
+ continue;
+
+ MachineRegisterInfo &MRI = Inst->getParent()->getParent()->getRegInfo();
+
+ // Use the new VALU Opcode.
+ const MCInstrDesc &NewDesc = get(NewOpcode);
+ Inst->setDesc(NewDesc);
+
+ // Remove any references to SCC. Vector instructions can't read from it, and
+ // We're just about to add the implicit use / defs of VCC, and we don't want
+ // both.
+ for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
+ MachineOperand &Op = Inst->getOperand(i);
+ if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
+ Inst->RemoveOperand(i);
+ }
+
+ // Add the implict and explicit register definitions.
+ if (NewDesc.ImplicitUses) {
+ for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
+ unsigned Reg = NewDesc.ImplicitUses[i];
+ Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
+ }
+ }
+
+ if (NewDesc.ImplicitDefs) {
+ for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
+ unsigned Reg = NewDesc.ImplicitDefs[i];
+ Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
+ }
+ }
+
+ legalizeOperands(Inst);
+
+ // Update the destination register class.
+ const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
+
+ switch (Inst->getOpcode()) {
+ // For target instructions, getOpRegClass just returns the virtual
+ // register class associated with the operand, so we need to find an
+ // equivalent VGPR register class in order to move the instruction to the
+ // VALU.
+ case AMDGPU::COPY:
+ case AMDGPU::PHI:
+ case AMDGPU::REG_SEQUENCE:
+ if (RI.hasVGPRs(NewDstRC))
+ continue;
+ NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
+ if (!NewDstRC)
+ continue;
+ break;
+ default:
+ break;
+ }
+
+ unsigned DstReg = Inst->getOperand(0).getReg();
+ unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
+ MRI.replaceRegWith(DstReg, NewDstReg);
+
+ for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
+ E = MRI.use_end(); I != E; ++I) {
+ MachineInstr &UseMI = *I;
+ if (!canReadVGPR(UseMI, I.getOperandNo())) {
+ Worklist.push_back(&UseMI);
+ }
+ }
+ }
}
-const TargetRegisterClass *SIInstrInfo::getIndirectAddrStoreRegClass(
- unsigned SourceReg) const {
- llvm_unreachable("Unimplemented");
+//===----------------------------------------------------------------------===//
+// Indirect addressing callbacks
+//===----------------------------------------------------------------------===//
+
+unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
+ unsigned Channel) const {
+ assert(Channel == 0);
+ return RegIndex;
}
-const TargetRegisterClass *SIInstrInfo::getIndirectAddrLoadRegClass() const {
- llvm_unreachable("Unimplemented");
+const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
+ return &AMDGPU::VReg_32RegClass;
}
MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
@@ -257,7 +647,17 @@ MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
MachineBasicBlock::iterator I,
unsigned ValueReg,
unsigned Address, unsigned OffsetReg) const {
- llvm_unreachable("Unimplemented");
+ const DebugLoc &DL = MBB->findDebugLoc(I);
+ unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
+ getIndirectIndexBegin(*MBB->getParent()));
+
+ return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
+ .addReg(IndirectBaseReg, RegState::Define)
+ .addOperand(I->getOperand(0))
+ .addReg(IndirectBaseReg)
+ .addReg(OffsetReg)
+ .addImm(0)
+ .addReg(ValueReg);
}
MachineInstrBuilder SIInstrInfo::buildIndirectRead(
@@ -265,9 +665,43 @@ MachineInstrBuilder SIInstrInfo::buildIndirectRead(
MachineBasicBlock::iterator I,
unsigned ValueReg,
unsigned Address, unsigned OffsetReg) const {
- llvm_unreachable("Unimplemented");
+ const DebugLoc &DL = MBB->findDebugLoc(I);
+ unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
+ getIndirectIndexBegin(*MBB->getParent()));
+
+ return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
+ .addOperand(I->getOperand(0))
+ .addOperand(I->getOperand(1))
+ .addReg(IndirectBaseReg)
+ .addReg(OffsetReg)
+ .addImm(0);
+
}
-const TargetRegisterClass *SIInstrInfo::getSuperIndirectRegClass() const {
- llvm_unreachable("Unimplemented");
+void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
+ const MachineFunction &MF) const {
+ int End = getIndirectIndexEnd(MF);
+ int Begin = getIndirectIndexBegin(MF);
+
+ if (End == -1)
+ return;
+
+
+ for (int Index = Begin; Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
+
+ for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
+
+ for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
+
+ for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
+
+ for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
+
+ for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
+ Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));
}
diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/R600/SIInstrInfo.h
index 87eff4d..4af6348 100644
--- a/lib/Target/R600/SIInstrInfo.h
+++ b/lib/Target/R600/SIInstrInfo.h
@@ -1,4 +1,4 @@
-//===-- SIInstrInfo.h - SI Instruction Info Interface ---------------------===//
+//===-- SIInstrInfo.h - SI Instruction Info Interface -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,6 +25,14 @@ class SIInstrInfo : public AMDGPUInstrInfo {
private:
const SIRegisterInfo RI;
+ MachineInstrBuilder buildIndirectIndexLoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned OffsetVGPR,
+ unsigned MovRelOp,
+ unsigned Dst,
+ unsigned Src0) const;
+ // If you add or remove instructions from this function, you will
+
public:
explicit SIInstrInfo(AMDGPUTargetMachine &tm);
@@ -40,25 +48,65 @@ public:
virtual MachineInstr *commuteInstruction(MachineInstr *MI,
bool NewMI=false) const;
- virtual MachineInstr * getMovImmInstr(MachineFunction *MF, unsigned DstReg,
- int64_t Imm) const;
-
virtual unsigned getIEQOpcode() const { assert(!"Implement"); return 0;}
+ MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DstReg, unsigned SrcReg) const;
virtual bool isMov(unsigned Opcode) const;
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const;
-
- virtual int getIndirectIndexBegin(const MachineFunction &MF) const;
-
- virtual int getIndirectIndexEnd(const MachineFunction &MF) const;
+ int isMIMG(uint16_t Opcode) const;
+ int isSMRD(uint16_t Opcode) const;
+ bool isVOP1(uint16_t Opcode) const;
+ bool isVOP2(uint16_t Opcode) const;
+ bool isVOP3(uint16_t Opcode) const;
+ bool isVOPC(uint16_t Opcode) const;
+ bool isInlineConstant(const MachineOperand &MO) const;
+ bool isLiteralConstant(const MachineOperand &MO) const;
+
+ virtual bool verifyInstruction(const MachineInstr *MI,
+ StringRef &ErrInfo) const;
+
+ bool isSALUInstr(const MachineInstr &MI) const;
+ static unsigned getVALUOp(const MachineInstr &MI);
+ bool isSALUOpSupportedOnVALU(const MachineInstr &MI) const;
+
+ /// \brief Return the correct register class for \p OpNo. For target-specific
+ /// instructions, this will return the register class that has been defined
+ /// in tablegen. For generic instructions, like REG_SEQUENCE it will return
+ /// the register class of its machine operand.
+ /// to infer the correct register class base on the other operands.
+ const TargetRegisterClass *getOpRegClass(const MachineInstr &MI,
+ unsigned OpNo) const;\
+
+ /// \returns true if it is legal for the operand at index \p OpNo
+ /// to read a VGPR.
+ bool canReadVGPR(const MachineInstr &MI, unsigned OpNo) const;
+
+ /// \brief Legalize the \p OpIndex operand of this instruction by inserting
+ /// a MOV. For example:
+ /// ADD_I32_e32 VGPR0, 15
+ /// to
+ /// MOV VGPR1, 15
+ /// ADD_I32_e32 VGPR0, VGPR1
+ ///
+ /// If the operand being legalized is a register, then a COPY will be used
+ /// instead of MOV.
+ void legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const;
+
+ /// \brief Legalize all operands in this instruction. This function may
+ /// create new instruction and insert them before \p MI.
+ void legalizeOperands(MachineInstr *MI) const;
+
+ /// \brief Replace this instruction's opcode with the equivalent VALU
+ /// opcode. This function will also move the users of \p MI to the
+ /// VALU if necessary.
+ void moveToVALU(MachineInstr &MI) const;
virtual unsigned calculateIndirectAddress(unsigned RegIndex,
unsigned Channel) const;
- virtual const TargetRegisterClass *getIndirectAddrStoreRegClass(
- unsigned SourceReg) const;
-
- virtual const TargetRegisterClass *getIndirectAddrLoadRegClass() const;
+ virtual const TargetRegisterClass *getIndirectAddrRegClass() const;
virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
@@ -71,16 +119,18 @@ public:
unsigned ValueReg,
unsigned Address,
unsigned OffsetReg) const;
+ void reserveIndirectRegisters(BitVector &Reserved,
+ const MachineFunction &MF) const;
- virtual const TargetRegisterClass *getSuperIndirectRegClass() const;
- };
+ void LoadM0(MachineInstr *MoveRel, MachineBasicBlock::iterator I,
+ unsigned SavReg, unsigned IndexReg) const;
+};
namespace AMDGPU {
int getVOPe64(uint16_t Opcode);
int getCommuteRev(uint16_t Opcode);
int getCommuteOrig(uint16_t Opcode);
- int isMIMG(uint16_t Opcode);
} // End namespace AMDGPU
diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td
index c8aecb7..4cd0daa 100644
--- a/lib/Target/R600/SIInstrInfo.td
+++ b/lib/Target/R600/SIInstrInfo.td
@@ -16,19 +16,64 @@ def SIadd64bit32bit : SDNode<"ISD::ADD",
SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisVT<0, i64>, SDTCisVT<2, i32>]>
>;
+def SIload_constant : SDNode<"AMDGPUISD::LOAD_CONSTANT",
+ SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisVT<1, i128>, SDTCisVT<2, i32>]>,
+ [SDNPMayLoad, SDNPMemOperand]
+>;
+
+def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT",
+ SDTypeProfile<0, 13,
+ [SDTCisVT<0, i128>, // rsrc(SGPR)
+ SDTCisVT<1, iAny>, // vdata(VGPR)
+ SDTCisVT<2, i32>, // num_channels(imm)
+ SDTCisVT<3, i32>, // vaddr(VGPR)
+ SDTCisVT<4, i32>, // soffset(SGPR)
+ SDTCisVT<5, i32>, // inst_offset(imm)
+ SDTCisVT<6, i32>, // dfmt(imm)
+ SDTCisVT<7, i32>, // nfmt(imm)
+ SDTCisVT<8, i32>, // offen(imm)
+ SDTCisVT<9, i32>, // idxen(imm)
+ SDTCisVT<10, i32>, // glc(imm)
+ SDTCisVT<11, i32>, // slc(imm)
+ SDTCisVT<12, i32> // tfe(imm)
+ ]>,
+ [SDNPMayStore, SDNPMemOperand, SDNPHasChain]
+>;
+
+def SIload_input : SDNode<"AMDGPUISD::LOAD_INPUT",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisVT<1, i128>, SDTCisVT<2, i16>,
+ SDTCisVT<3, i32>]>
+>;
+
+class SDSample<string opcode> : SDNode <opcode,
+ SDTypeProfile<1, 4, [SDTCisVT<0, v4f32>, SDTCisVT<2, v32i8>,
+ SDTCisVT<3, i128>, SDTCisVT<4, i32>]>
+>;
+
+def SIsample : SDSample<"AMDGPUISD::SAMPLE">;
+def SIsampleb : SDSample<"AMDGPUISD::SAMPLEB">;
+def SIsampled : SDSample<"AMDGPUISD::SAMPLED">;
+def SIsamplel : SDSample<"AMDGPUISD::SAMPLEL">;
+
// Transformation function, extract the lower 32bit of a 64bit immediate
def LO32 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue() & 0xffffffff, MVT::i32);
}]>;
+def LO32f : SDNodeXForm<fpimm, [{
+ APInt V = N->getValueAPF().bitcastToAPInt().trunc(32);
+ return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32);
+}]>;
+
// Transformation function, extract the upper 32bit of a 64bit immediate
def HI32 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue() >> 32, MVT::i32);
}]>;
-def SIbuffer_store : SDNode<"AMDGPUISD::BUFFER_STORE",
- SDTypeProfile<0, 3, [SDTCisPtrTy<1>, SDTCisInt<2>]>,
- [SDNPHasChain, SDNPMayStore]>;
+def HI32f : SDNodeXForm<fpimm, [{
+ APInt V = N->getValueAPF().bitcastToAPInt().lshr(32).trunc(32);
+ return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32);
+}]>;
def IMM8bitDWORD : ImmLeaf <
i32, [{
@@ -39,15 +84,47 @@ def IMM8bitDWORD : ImmLeaf <
}]>
>;
-def IMM12bit : ImmLeaf <
- i16,
- [{return isUInt<12>(Imm);}]
+def as_i1imm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getZExtValue(), MVT::i1);
+}]>;
+
+def as_i8imm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getZExtValue(), MVT::i8);
+}]>;
+
+def as_i16imm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i16);
+}]>;
+
+def IMM12bit : PatLeaf <(imm),
+ [{return isUInt<12>(N->getZExtValue());}]
>;
class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
- return ((const SITargetLowering &)TLI).analyzeImmediate(N) == 0;
+ return
+ (*(const SITargetLowering *)getTargetLowering()).analyzeImmediate(N) == 0;
+}]>;
+
+class SGPRImm <dag frag> : PatLeaf<frag, [{
+ if (TM.getSubtarget<AMDGPUSubtarget>().getGeneration() <
+ AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ return false;
+ }
+ const SIRegisterInfo *SIRI =
+ static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
+ for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
+ U != E; ++U) {
+ if (SIRI->isSGPRClass(getOperandRegClass(*U, U.getOperandNo()))) {
+ return true;
+ }
+ }
+ return false;
}]>;
+def FRAMEri64 : Operand<iPTR> {
+ let MIOperandInfo = (ops SReg_32:$ptr, i32imm:$index);
+}
+
//===----------------------------------------------------------------------===//
// SI assembler operands
//===----------------------------------------------------------------------===//
@@ -99,6 +176,11 @@ class SOP2_64 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
opName#" $dst, $src0, $src1", pattern
>;
+class SOP2_SHIFT_64 <bits<7> op, string opName, list<dag> pattern> : SOP2 <
+ op, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_32:$src1),
+ opName#" $dst, $src0, $src1", pattern
+>;
+
class SOPC_32 <bits<7> op, string opName, list<dag> pattern> : SOPC <
op, (outs SCCReg:$dst), (ins SSrc_32:$src0, SSrc_32:$src1),
opName#" $dst, $src0, $src1", pattern
@@ -163,8 +245,8 @@ multiclass VOP1_Helper <bits<8> op, RegisterClass drc, RegisterClass src,
i32imm:$omod, i32imm:$neg),
opName#"_e64 $dst, $src0, $abs, $clamp, $omod, $neg", []
>, VOP <opName> {
- let SRC1 = SIOperand.ZERO;
- let SRC2 = SIOperand.ZERO;
+ let src1 = SIOperand.ZERO;
+ let src2 = SIOperand.ZERO;
}
}
@@ -174,6 +256,12 @@ multiclass VOP1_32 <bits<8> op, string opName, list<dag> pattern>
multiclass VOP1_64 <bits<8> op, string opName, list<dag> pattern>
: VOP1_Helper <op, VReg_64, VSrc_64, opName, pattern>;
+multiclass VOP1_32_64 <bits<8> op, string opName, list<dag> pattern>
+ : VOP1_Helper <op, VReg_32, VSrc_64, opName, pattern>;
+
+multiclass VOP1_64_32 <bits<8> op, string opName, list<dag> pattern>
+ : VOP1_Helper <op, VReg_64, VSrc_32, opName, pattern>;
+
multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
string opName, list<dag> pattern, string revOp> {
def _e32 : VOP2 <
@@ -189,7 +277,7 @@ multiclass VOP2_Helper <bits<6> op, RegisterClass vrc, RegisterClass arc,
i32imm:$omod, i32imm:$neg),
opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
>, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
- let SRC2 = SIOperand.ZERO;
+ let src2 = SIOperand.ZERO;
}
}
@@ -217,11 +305,11 @@ multiclass VOP2b_32 <bits<6> op, string opName, list<dag> pattern,
i32imm:$omod, i32imm:$neg),
opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", []
>, VOP <opName>, VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
- let SRC2 = SIOperand.ZERO;
+ let src2 = SIOperand.ZERO;
/* the VOP2 variant puts the carry out into VCC, the VOP3 variant
can write it into any SGPR. We currently don't use the carry out,
so for now hardcode it to VCC as well */
- let SDST = SIOperand.VCC;
+ let sdst = SIOperand.VCC;
}
}
@@ -244,7 +332,7 @@ multiclass VOPC_Helper <bits<8> op, RegisterClass vrc, RegisterClass arc,
[(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), arc:$src1, cond)))]
)
>, VOP <opName> {
- let SRC2 = SIOperand.ZERO;
+ let src2 = SIOperand.ZERO;
}
}
@@ -263,6 +351,19 @@ class VOP3_32 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern
>, VOP <opName>;
+class VOP3_64_Shift <bits <9> op, string opName, list<dag> pattern> : VOP3 <
+ op, (outs VReg_64:$dst),
+ (ins VSrc_64:$src0, VSrc_32:$src1),
+ opName#" $dst, $src0, $src1", pattern
+>, VOP <opName> {
+
+ let src2 = SIOperand.ZERO;
+ let abs = 0;
+ let clamp = 0;
+ let omod = 0;
+ let neg = 0;
+}
+
class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
op, (outs VReg_64:$dst),
(ins VSrc_64:$src0, VSrc_64:$src1, VSrc_64:$src2,
@@ -274,6 +375,41 @@ class VOP3_64 <bits<9> op, string opName, list<dag> pattern> : VOP3 <
// Vector I/O classes
//===----------------------------------------------------------------------===//
+class DS_Load_Helper <bits<8> op, string asm, RegisterClass regClass> : DS <
+ op,
+ (outs regClass:$vdst),
+ (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, VReg_32:$data1,
+ i8imm:$offset0, i8imm:$offset1),
+ asm#" $vdst, $gds, $addr, $data0, $data1, $offset0, $offset1, [M0]",
+ []> {
+ let mayLoad = 1;
+ let mayStore = 0;
+}
+
+class DS_Store_Helper <bits<8> op, string asm, RegisterClass regClass> : DS <
+ op,
+ (outs),
+ (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, VReg_32:$data1,
+ i8imm:$offset0, i8imm:$offset1),
+ asm#" $gds, $addr, $data0, $data1, $offset0, $offset1, [M0]",
+ []> {
+ let mayStore = 1;
+ let mayLoad = 0;
+ let vdst = 0;
+}
+
+class DS_1A1D_RET <bits<8> op, string asm, RegisterClass rc> : DS <
+ op,
+ (outs rc:$vdst),
+ (ins i1imm:$gds, VReg_32:$addr, VReg_32:$data0, i8imm:$offset0,
+ i8imm:$offset1),
+ asm#" $gds, $vdst, $addr, $data0, $offset0, $offset1, [M0]",
+ []> {
+ let mayStore = 1;
+ let mayLoad = 1;
+ let data1 = 0;
+}
+
class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF <
op,
(outs),
@@ -287,31 +423,41 @@ class MTBUF_Store_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBU
let mayLoad = 0;
}
-class MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass> : MUBUF <
- op,
- (outs regClass:$vdata),
- (ins i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64,
- i1imm:$lds, VReg_32:$vaddr, SReg_128:$srsrc, i1imm:$slc,
- i1imm:$tfe, SSrc_32:$soffset),
- asm#" $vdata, $offset, $offen, $idxen, $glc, $addr64, "
- #"$lds, $vaddr, $srsrc, $slc, $tfe, $soffset",
- []> {
- let mayLoad = 1;
- let mayStore = 0;
+multiclass MUBUF_Load_Helper <bits<7> op, string asm, RegisterClass regClass> {
+
+ let glc = 0, lds = 0, slc = 0, tfe = 0, soffset = 128 /* ZERO */,
+ mayLoad = 1 in {
+
+ let offen = 1, idxen = 0, addr64 = 0, offset = 0 in {
+ def _OFFEN : MUBUF <op, (outs regClass:$vdata),
+ (ins SReg_128:$srsrc, VReg_32:$vaddr),
+ asm#" $vdata, $srsrc + $vaddr", []>;
+ }
+
+ let offen = 0, idxen = 1, addr64 = 0 in {
+ def _IDXEN : MUBUF <op, (outs regClass:$vdata),
+ (ins SReg_128:$srsrc, VReg_32:$vaddr, i16imm:$offset),
+ asm#" $vdata, $srsrc[$vaddr] + $offset", []>;
+ }
+
+ let offen = 0, idxen = 0, addr64 = 1 in {
+ def _ADDR64 : MUBUF <op, (outs regClass:$vdata),
+ (ins SReg_128:$srsrc, VReg_64:$vaddr, i16imm:$offset),
+ asm#" $vdata, $srsrc + $vaddr + $offset", []>;
+ }
+ }
}
-class MUBUF_Store_Helper <bits<7> op, string name, RegisterClass vdataClass,
- ValueType VT> :
- MUBUF <op, (outs), (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr),
- name#" $vdata, $srsrc + $vaddr",
- [(SIbuffer_store (VT vdataClass:$vdata), (i128 SReg_128:$srsrc),
- (i64 VReg_64:$vaddr))]> {
+class MUBUF_Store_Helper <bits<7> op, string name, RegisterClass vdataClass> :
+ MUBUF <op, (outs), (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr,
+ i16imm:$offset),
+ name#" $vdata, $srsrc + $vaddr + $offset",
+ []> {
let mayLoad = 0;
let mayStore = 1;
// Encoding
- let offset = 0;
let offen = 0;
let idxen = 0;
let glc = 0;
@@ -335,11 +481,18 @@ class MTBUF_Load_Helper <bits<3> op, string asm, RegisterClass regClass> : MTBUF
let mayStore = 0;
}
-class MIMG_NoSampler_Helper <bits<7> op, string asm> : MIMG <
+class MIMG_Mask <string op, int channels> {
+ string Op = op;
+ int Channels = channels;
+}
+
+class MIMG_NoSampler_Helper <bits<7> op, string asm,
+ RegisterClass dst_rc,
+ RegisterClass src_rc> : MIMG <
op,
- (outs VReg_128:$vdata),
+ (outs dst_rc:$vdata),
(ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
- i1imm:$tfe, i1imm:$lwe, i1imm:$slc, unknown:$vaddr,
+ i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
SReg_256:$srsrc),
asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
#" $tfe, $lwe, $slc, $vaddr, $srsrc",
@@ -350,11 +503,31 @@ class MIMG_NoSampler_Helper <bits<7> op, string asm> : MIMG <
let hasPostISelHook = 1;
}
-class MIMG_Sampler_Helper <bits<7> op, string asm> : MIMG <
+multiclass MIMG_NoSampler_Src_Helper <bits<7> op, string asm,
+ RegisterClass dst_rc,
+ int channels> {
+ def _V1 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_32>,
+ MIMG_Mask<asm#"_V1", channels>;
+ def _V2 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_64>,
+ MIMG_Mask<asm#"_V2", channels>;
+ def _V4 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_128>,
+ MIMG_Mask<asm#"_V4", channels>;
+}
+
+multiclass MIMG_NoSampler <bits<7> op, string asm> {
+ defm _V1 : MIMG_NoSampler_Src_Helper <op, asm, VReg_32, 1>;
+ defm _V2 : MIMG_NoSampler_Src_Helper <op, asm, VReg_64, 2>;
+ defm _V3 : MIMG_NoSampler_Src_Helper <op, asm, VReg_96, 3>;
+ defm _V4 : MIMG_NoSampler_Src_Helper <op, asm, VReg_128, 4>;
+}
+
+class MIMG_Sampler_Helper <bits<7> op, string asm,
+ RegisterClass dst_rc,
+ RegisterClass src_rc> : MIMG <
op,
- (outs VReg_128:$vdata),
+ (outs dst_rc:$vdata),
(ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
- i1imm:$tfe, i1imm:$lwe, i1imm:$slc, unknown:$vaddr,
+ i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
SReg_256:$srsrc, SReg_128:$ssamp),
asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
#" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp",
@@ -364,6 +537,28 @@ class MIMG_Sampler_Helper <bits<7> op, string asm> : MIMG <
let hasPostISelHook = 1;
}
+multiclass MIMG_Sampler_Src_Helper <bits<7> op, string asm,
+ RegisterClass dst_rc,
+ int channels> {
+ def _V1 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_32>,
+ MIMG_Mask<asm#"_V1", channels>;
+ def _V2 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_64>,
+ MIMG_Mask<asm#"_V2", channels>;
+ def _V4 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_128>,
+ MIMG_Mask<asm#"_V4", channels>;
+ def _V8 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_256>,
+ MIMG_Mask<asm#"_V8", channels>;
+ def _V16 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_512>,
+ MIMG_Mask<asm#"_V16", channels>;
+}
+
+multiclass MIMG_Sampler <bits<7> op, string asm> {
+ defm _V1 : MIMG_Sampler_Src_Helper<op, asm, VReg_32, 1>;
+ defm _V2 : MIMG_Sampler_Src_Helper<op, asm, VReg_64, 2>;
+ defm _V3 : MIMG_Sampler_Src_Helper<op, asm, VReg_96, 3>;
+ defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4>;
+}
+
//===----------------------------------------------------------------------===//
// Vector instruction mappings
//===----------------------------------------------------------------------===//
@@ -386,6 +581,14 @@ def getCommuteRev : InstrMapping {
let ValueCols = [["0"]];
}
+def getMaskedMIMGOp : InstrMapping {
+ let FilterClass = "MIMG_Mask";
+ let RowFields = ["Op"];
+ let ColFields = ["Channels"];
+ let KeyCol = ["4"];
+ let ValueCols = [["1"], ["2"], ["3"] ];
+}
+
// Maps an commuted opcode to its original version
def getCommuteOrig : InstrMapping {
let FilterClass = "VOP2_REV";
@@ -395,13 +598,4 @@ def getCommuteOrig : InstrMapping {
let ValueCols = [["1"]];
}
-// Test if the supplied opcode is an MIMG instruction
-def isMIMG : InstrMapping {
- let FilterClass = "MIMG";
- let RowFields = ["Inst"];
- let ColFields = ["Size"];
- let KeyCol = ["8"];
- let ValueCols = [["8"]];
-}
-
include "SIInstructions.td"
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 0d50c5d..76f05eb 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -22,8 +22,10 @@ def InterpSlot : Operand<i32> {
let PrintMethod = "printInterpSlot";
}
-def isSI : Predicate<"Subtarget.device()"
- "->getGeneration() == AMDGPUDeviceInfo::HD7XXX">;
+def isSI : Predicate<"Subtarget.getGeneration() "
+ ">= AMDGPUSubtarget::SOUTHERN_ISLANDS">;
+
+def WAIT_FLAG : InstFlag<"printWaitFlag">;
let Predicates = [isSI] in {
@@ -126,8 +128,11 @@ def S_CMPK_LT_U32 : SOPK_32 <0x0000000d, "S_CMPK_LT_U32", []>;
def S_CMPK_LE_U32 : SOPK_32 <0x0000000e, "S_CMPK_LE_U32", []>;
} // End isCompare = 1
-def S_ADDK_I32 : SOPK_32 <0x0000000f, "S_ADDK_I32", []>;
-def S_MULK_I32 : SOPK_32 <0x00000010, "S_MULK_I32", []>;
+let Defs = [SCC], isCommutable = 1 in {
+ def S_ADDK_I32 : SOPK_32 <0x0000000f, "S_ADDK_I32", []>;
+ def S_MULK_I32 : SOPK_32 <0x00000010, "S_MULK_I32", []>;
+}
+
//def S_CBRANCH_I_FORK : SOPK_ <0x00000011, "S_CBRANCH_I_FORK", []>;
def S_GETREG_B32 : SOPK_32 <0x00000012, "S_GETREG_B32", []>;
def S_SETREG_B32 : SOPK_32 <0x00000013, "S_SETREG_B32", []>;
@@ -138,19 +143,19 @@ def S_GETREG_REGRD_B32 : SOPK_32 <0x00000014, "S_GETREG_REGRD_B32", []>;
let isCompare = 1 in {
defm V_CMP_F_F32 : VOPC_32 <0x00000000, "V_CMP_F_F32">;
-defm V_CMP_LT_F32 : VOPC_32 <0x00000001, "V_CMP_LT_F32", f32, COND_LT>;
-defm V_CMP_EQ_F32 : VOPC_32 <0x00000002, "V_CMP_EQ_F32", f32, COND_EQ>;
-defm V_CMP_LE_F32 : VOPC_32 <0x00000003, "V_CMP_LE_F32", f32, COND_LE>;
-defm V_CMP_GT_F32 : VOPC_32 <0x00000004, "V_CMP_GT_F32", f32, COND_GT>;
-defm V_CMP_LG_F32 : VOPC_32 <0x00000005, "V_CMP_LG_F32", f32, COND_NE>;
-defm V_CMP_GE_F32 : VOPC_32 <0x00000006, "V_CMP_GE_F32", f32, COND_GE>;
-defm V_CMP_O_F32 : VOPC_32 <0x00000007, "V_CMP_O_F32">;
-defm V_CMP_U_F32 : VOPC_32 <0x00000008, "V_CMP_U_F32">;
+defm V_CMP_LT_F32 : VOPC_32 <0x00000001, "V_CMP_LT_F32", f32, COND_OLT>;
+defm V_CMP_EQ_F32 : VOPC_32 <0x00000002, "V_CMP_EQ_F32", f32, COND_OEQ>;
+defm V_CMP_LE_F32 : VOPC_32 <0x00000003, "V_CMP_LE_F32", f32, COND_OLE>;
+defm V_CMP_GT_F32 : VOPC_32 <0x00000004, "V_CMP_GT_F32", f32, COND_OGT>;
+defm V_CMP_LG_F32 : VOPC_32 <0x00000005, "V_CMP_LG_F32">;
+defm V_CMP_GE_F32 : VOPC_32 <0x00000006, "V_CMP_GE_F32", f32, COND_OGE>;
+defm V_CMP_O_F32 : VOPC_32 <0x00000007, "V_CMP_O_F32", f32, COND_O>;
+defm V_CMP_U_F32 : VOPC_32 <0x00000008, "V_CMP_U_F32", f32, COND_UO>;
defm V_CMP_NGE_F32 : VOPC_32 <0x00000009, "V_CMP_NGE_F32">;
defm V_CMP_NLG_F32 : VOPC_32 <0x0000000a, "V_CMP_NLG_F32">;
defm V_CMP_NGT_F32 : VOPC_32 <0x0000000b, "V_CMP_NGT_F32">;
defm V_CMP_NLE_F32 : VOPC_32 <0x0000000c, "V_CMP_NLE_F32">;
-defm V_CMP_NEQ_F32 : VOPC_32 <0x0000000d, "V_CMP_NEQ_F32", f32, COND_NE>;
+defm V_CMP_NEQ_F32 : VOPC_32 <0x0000000d, "V_CMP_NEQ_F32", f32, COND_UNE>;
defm V_CMP_NLT_F32 : VOPC_32 <0x0000000e, "V_CMP_NLT_F32">;
defm V_CMP_TRU_F32 : VOPC_32 <0x0000000f, "V_CMP_TRU_F32">;
@@ -176,19 +181,19 @@ defm V_CMPX_TRU_F32 : VOPC_32 <0x0000001f, "V_CMPX_TRU_F32">;
} // End hasSideEffects = 1, Defs = [EXEC]
defm V_CMP_F_F64 : VOPC_64 <0x00000020, "V_CMP_F_F64">;
-defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64">;
-defm V_CMP_EQ_F64 : VOPC_64 <0x00000022, "V_CMP_EQ_F64">;
-defm V_CMP_LE_F64 : VOPC_64 <0x00000023, "V_CMP_LE_F64">;
-defm V_CMP_GT_F64 : VOPC_64 <0x00000024, "V_CMP_GT_F64">;
+defm V_CMP_LT_F64 : VOPC_64 <0x00000021, "V_CMP_LT_F64", f64, COND_OLT>;
+defm V_CMP_EQ_F64 : VOPC_64 <0x00000022, "V_CMP_EQ_F64", f64, COND_OEQ>;
+defm V_CMP_LE_F64 : VOPC_64 <0x00000023, "V_CMP_LE_F64", f64, COND_OLE>;
+defm V_CMP_GT_F64 : VOPC_64 <0x00000024, "V_CMP_GT_F64", f64, COND_OGT>;
defm V_CMP_LG_F64 : VOPC_64 <0x00000025, "V_CMP_LG_F64">;
-defm V_CMP_GE_F64 : VOPC_64 <0x00000026, "V_CMP_GE_F64">;
-defm V_CMP_O_F64 : VOPC_64 <0x00000027, "V_CMP_O_F64">;
-defm V_CMP_U_F64 : VOPC_64 <0x00000028, "V_CMP_U_F64">;
+defm V_CMP_GE_F64 : VOPC_64 <0x00000026, "V_CMP_GE_F64", f64, COND_OGE>;
+defm V_CMP_O_F64 : VOPC_64 <0x00000027, "V_CMP_O_F64", f64, COND_O>;
+defm V_CMP_U_F64 : VOPC_64 <0x00000028, "V_CMP_U_F64", f64, COND_UO>;
defm V_CMP_NGE_F64 : VOPC_64 <0x00000029, "V_CMP_NGE_F64">;
defm V_CMP_NLG_F64 : VOPC_64 <0x0000002a, "V_CMP_NLG_F64">;
defm V_CMP_NGT_F64 : VOPC_64 <0x0000002b, "V_CMP_NGT_F64">;
defm V_CMP_NLE_F64 : VOPC_64 <0x0000002c, "V_CMP_NLE_F64">;
-defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64">;
+defm V_CMP_NEQ_F64 : VOPC_64 <0x0000002d, "V_CMP_NEQ_F64", f64, COND_UNE>;
defm V_CMP_NLT_F64 : VOPC_64 <0x0000002e, "V_CMP_NLT_F64">;
defm V_CMP_TRU_F64 : VOPC_64 <0x0000002f, "V_CMP_TRU_F64">;
@@ -290,12 +295,12 @@ defm V_CMPSX_TRU_F64 : VOPC_64 <0x0000007f, "V_CMPSX_TRU_F64">;
} // End hasSideEffects = 1, Defs = [EXEC]
defm V_CMP_F_I32 : VOPC_32 <0x00000080, "V_CMP_F_I32">;
-defm V_CMP_LT_I32 : VOPC_32 <0x00000081, "V_CMP_LT_I32", i32, COND_LT>;
+defm V_CMP_LT_I32 : VOPC_32 <0x00000081, "V_CMP_LT_I32", i32, COND_SLT>;
defm V_CMP_EQ_I32 : VOPC_32 <0x00000082, "V_CMP_EQ_I32", i32, COND_EQ>;
-defm V_CMP_LE_I32 : VOPC_32 <0x00000083, "V_CMP_LE_I32", i32, COND_LE>;
-defm V_CMP_GT_I32 : VOPC_32 <0x00000084, "V_CMP_GT_I32", i32, COND_GT>;
+defm V_CMP_LE_I32 : VOPC_32 <0x00000083, "V_CMP_LE_I32", i32, COND_SLE>;
+defm V_CMP_GT_I32 : VOPC_32 <0x00000084, "V_CMP_GT_I32", i32, COND_SGT>;
defm V_CMP_NE_I32 : VOPC_32 <0x00000085, "V_CMP_NE_I32", i32, COND_NE>;
-defm V_CMP_GE_I32 : VOPC_32 <0x00000086, "V_CMP_GE_I32", i32, COND_GE>;
+defm V_CMP_GE_I32 : VOPC_32 <0x00000086, "V_CMP_GE_I32", i32, COND_SGE>;
defm V_CMP_T_I32 : VOPC_32 <0x00000087, "V_CMP_T_I32">;
let hasSideEffects = 1, Defs = [EXEC] in {
@@ -312,12 +317,12 @@ defm V_CMPX_T_I32 : VOPC_32 <0x00000097, "V_CMPX_T_I32">;
} // End hasSideEffects = 1, Defs = [EXEC]
defm V_CMP_F_I64 : VOPC_64 <0x000000a0, "V_CMP_F_I64">;
-defm V_CMP_LT_I64 : VOPC_64 <0x000000a1, "V_CMP_LT_I64">;
-defm V_CMP_EQ_I64 : VOPC_64 <0x000000a2, "V_CMP_EQ_I64">;
-defm V_CMP_LE_I64 : VOPC_64 <0x000000a3, "V_CMP_LE_I64">;
-defm V_CMP_GT_I64 : VOPC_64 <0x000000a4, "V_CMP_GT_I64">;
-defm V_CMP_NE_I64 : VOPC_64 <0x000000a5, "V_CMP_NE_I64">;
-defm V_CMP_GE_I64 : VOPC_64 <0x000000a6, "V_CMP_GE_I64">;
+defm V_CMP_LT_I64 : VOPC_64 <0x000000a1, "V_CMP_LT_I64", i64, COND_SLT>;
+defm V_CMP_EQ_I64 : VOPC_64 <0x000000a2, "V_CMP_EQ_I64", i64, COND_EQ>;
+defm V_CMP_LE_I64 : VOPC_64 <0x000000a3, "V_CMP_LE_I64", i64, COND_SLE>;
+defm V_CMP_GT_I64 : VOPC_64 <0x000000a4, "V_CMP_GT_I64", i64, COND_SGT>;
+defm V_CMP_NE_I64 : VOPC_64 <0x000000a5, "V_CMP_NE_I64", i64, COND_NE>;
+defm V_CMP_GE_I64 : VOPC_64 <0x000000a6, "V_CMP_GE_I64", i64, COND_SGE>;
defm V_CMP_T_I64 : VOPC_64 <0x000000a7, "V_CMP_T_I64">;
let hasSideEffects = 1, Defs = [EXEC] in {
@@ -334,12 +339,12 @@ defm V_CMPX_T_I64 : VOPC_64 <0x000000b7, "V_CMPX_T_I64">;
} // End hasSideEffects = 1, Defs = [EXEC]
defm V_CMP_F_U32 : VOPC_32 <0x000000c0, "V_CMP_F_U32">;
-defm V_CMP_LT_U32 : VOPC_32 <0x000000c1, "V_CMP_LT_U32">;
-defm V_CMP_EQ_U32 : VOPC_32 <0x000000c2, "V_CMP_EQ_U32">;
-defm V_CMP_LE_U32 : VOPC_32 <0x000000c3, "V_CMP_LE_U32">;
-defm V_CMP_GT_U32 : VOPC_32 <0x000000c4, "V_CMP_GT_U32">;
-defm V_CMP_NE_U32 : VOPC_32 <0x000000c5, "V_CMP_NE_U32">;
-defm V_CMP_GE_U32 : VOPC_32 <0x000000c6, "V_CMP_GE_U32">;
+defm V_CMP_LT_U32 : VOPC_32 <0x000000c1, "V_CMP_LT_U32", i32, COND_ULT>;
+defm V_CMP_EQ_U32 : VOPC_32 <0x000000c2, "V_CMP_EQ_U32", i32, COND_EQ>;
+defm V_CMP_LE_U32 : VOPC_32 <0x000000c3, "V_CMP_LE_U32", i32, COND_ULE>;
+defm V_CMP_GT_U32 : VOPC_32 <0x000000c4, "V_CMP_GT_U32", i32, COND_UGT>;
+defm V_CMP_NE_U32 : VOPC_32 <0x000000c5, "V_CMP_NE_U32", i32, COND_NE>;
+defm V_CMP_GE_U32 : VOPC_32 <0x000000c6, "V_CMP_GE_U32", i32, COND_UGE>;
defm V_CMP_T_U32 : VOPC_32 <0x000000c7, "V_CMP_T_U32">;
let hasSideEffects = 1, Defs = [EXEC] in {
@@ -356,12 +361,12 @@ defm V_CMPX_T_U32 : VOPC_32 <0x000000d7, "V_CMPX_T_U32">;
} // End hasSideEffects = 1, Defs = [EXEC]
defm V_CMP_F_U64 : VOPC_64 <0x000000e0, "V_CMP_F_U64">;
-defm V_CMP_LT_U64 : VOPC_64 <0x000000e1, "V_CMP_LT_U64">;
-defm V_CMP_EQ_U64 : VOPC_64 <0x000000e2, "V_CMP_EQ_U64">;
-defm V_CMP_LE_U64 : VOPC_64 <0x000000e3, "V_CMP_LE_U64">;
-defm V_CMP_GT_U64 : VOPC_64 <0x000000e4, "V_CMP_GT_U64">;
-defm V_CMP_NE_U64 : VOPC_64 <0x000000e5, "V_CMP_NE_U64">;
-defm V_CMP_GE_U64 : VOPC_64 <0x000000e6, "V_CMP_GE_U64">;
+defm V_CMP_LT_U64 : VOPC_64 <0x000000e1, "V_CMP_LT_U64", i64, COND_ULT>;
+defm V_CMP_EQ_U64 : VOPC_64 <0x000000e2, "V_CMP_EQ_U64", i64, COND_EQ>;
+defm V_CMP_LE_U64 : VOPC_64 <0x000000e3, "V_CMP_LE_U64", i64, COND_ULE>;
+defm V_CMP_GT_U64 : VOPC_64 <0x000000e4, "V_CMP_GT_U64", i64, COND_UGT>;
+defm V_CMP_NE_U64 : VOPC_64 <0x000000e5, "V_CMP_NE_U64", i64, COND_NE>;
+defm V_CMP_GE_U64 : VOPC_64 <0x000000e6, "V_CMP_GE_U64", i64, COND_UGE>;
defm V_CMP_T_U64 : VOPC_64 <0x000000e7, "V_CMP_T_U64">;
let hasSideEffects = 1, Defs = [EXEC] in {
@@ -391,32 +396,52 @@ defm V_CMPX_CLASS_F64 : VOPC_64 <0x000000b8, "V_CMPX_CLASS_F64">;
} // End isCompare = 1
+def DS_ADD_U32_RTN : DS_1A1D_RET <0x20, "DS_ADD_U32_RTN", VReg_32>;
+def DS_SUB_U32_RTN : DS_1A1D_RET <0x21, "DS_SUB_U32_RTN", VReg_32>;
+def DS_WRITE_B32 : DS_Store_Helper <0x0000000d, "DS_WRITE_B32", VReg_32>;
+def DS_WRITE_B8 : DS_Store_Helper <0x00000001e, "DS_WRITE_B8", VReg_32>;
+def DS_WRITE_B16 : DS_Store_Helper <0x00000001f, "DS_WRITE_B16", VReg_32>;
+def DS_READ_B32 : DS_Load_Helper <0x00000036, "DS_READ_B32", VReg_32>;
+def DS_READ_I8 : DS_Load_Helper <0x00000039, "DS_READ_I8", VReg_32>;
+def DS_READ_U8 : DS_Load_Helper <0x0000003a, "DS_READ_U8", VReg_32>;
+def DS_READ_I16 : DS_Load_Helper <0x0000003b, "DS_READ_I16", VReg_32>;
+def DS_READ_U16 : DS_Load_Helper <0x0000003c, "DS_READ_U16", VReg_32>;
+
//def BUFFER_LOAD_FORMAT_X : MUBUF_ <0x00000000, "BUFFER_LOAD_FORMAT_X", []>;
//def BUFFER_LOAD_FORMAT_XY : MUBUF_ <0x00000001, "BUFFER_LOAD_FORMAT_XY", []>;
//def BUFFER_LOAD_FORMAT_XYZ : MUBUF_ <0x00000002, "BUFFER_LOAD_FORMAT_XYZ", []>;
-def BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMAT_XYZW", VReg_128>;
+defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMAT_XYZW", VReg_128>;
//def BUFFER_STORE_FORMAT_X : MUBUF_ <0x00000004, "BUFFER_STORE_FORMAT_X", []>;
//def BUFFER_STORE_FORMAT_XY : MUBUF_ <0x00000005, "BUFFER_STORE_FORMAT_XY", []>;
//def BUFFER_STORE_FORMAT_XYZ : MUBUF_ <0x00000006, "BUFFER_STORE_FORMAT_XYZ", []>;
//def BUFFER_STORE_FORMAT_XYZW : MUBUF_ <0x00000007, "BUFFER_STORE_FORMAT_XYZW", []>;
-//def BUFFER_LOAD_UBYTE : MUBUF_ <0x00000008, "BUFFER_LOAD_UBYTE", []>;
-//def BUFFER_LOAD_SBYTE : MUBUF_ <0x00000009, "BUFFER_LOAD_SBYTE", []>;
-//def BUFFER_LOAD_USHORT : MUBUF_ <0x0000000a, "BUFFER_LOAD_USHORT", []>;
-//def BUFFER_LOAD_SSHORT : MUBUF_ <0x0000000b, "BUFFER_LOAD_SSHORT", []>;
-def BUFFER_LOAD_DWORD : MUBUF_Load_Helper <0x0000000c, "BUFFER_LOAD_DWORD", VReg_32>;
-def BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64>;
-def BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128>;
-//def BUFFER_STORE_BYTE : MUBUF_ <0x00000018, "BUFFER_STORE_BYTE", []>;
-//def BUFFER_STORE_SHORT : MUBUF_ <0x0000001a, "BUFFER_STORE_SHORT", []>;
+defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <0x00000008, "BUFFER_LOAD_UBYTE", VReg_32>;
+defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper <0x00000009, "BUFFER_LOAD_SBYTE", VReg_32>;
+defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper <0x0000000a, "BUFFER_LOAD_USHORT", VReg_32>;
+defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper <0x0000000b, "BUFFER_LOAD_SSHORT", VReg_32>;
+defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper <0x0000000c, "BUFFER_LOAD_DWORD", VReg_32>;
+defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64>;
+defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128>;
+
+def BUFFER_STORE_BYTE : MUBUF_Store_Helper <
+ 0x00000018, "BUFFER_STORE_BYTE", VReg_32
+>;
+
+def BUFFER_STORE_SHORT : MUBUF_Store_Helper <
+ 0x0000001a, "BUFFER_STORE_SHORT", VReg_32
+>;
def BUFFER_STORE_DWORD : MUBUF_Store_Helper <
- 0x0000001c, "BUFFER_STORE_DWORD", VReg_32, i32
+ 0x0000001c, "BUFFER_STORE_DWORD", VReg_32
>;
def BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper <
- 0x0000001d, "BUFFER_STORE_DWORDX2", VReg_64, i64
+ 0x0000001d, "BUFFER_STORE_DWORDX2", VReg_64
+>;
+
+def BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper <
+ 0x0000001e, "BUFFER_STORE_DWORDX4", VReg_128
>;
-//def BUFFER_STORE_DWORDX4 : MUBUF_DWORDX4 <0x0000001e, "BUFFER_STORE_DWORDX4", []>;
//def BUFFER_ATOMIC_SWAP : MUBUF_ <0x00000030, "BUFFER_ATOMIC_SWAP", []>;
//def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <0x00000031, "BUFFER_ATOMIC_CMPSWAP", []>;
//def BUFFER_ATOMIC_ADD : MUBUF_ <0x00000032, "BUFFER_ATOMIC_ADD", []>;
@@ -457,21 +482,24 @@ def BUFFER_STORE_DWORDX2 : MUBUF_Store_Helper <
//def TBUFFER_LOAD_FORMAT_XY : MTBUF_ <0x00000001, "TBUFFER_LOAD_FORMAT_XY", []>;
//def TBUFFER_LOAD_FORMAT_XYZ : MTBUF_ <0x00000002, "TBUFFER_LOAD_FORMAT_XYZ", []>;
def TBUFFER_LOAD_FORMAT_XYZW : MTBUF_Load_Helper <0x00000003, "TBUFFER_LOAD_FORMAT_XYZW", VReg_128>;
-//def TBUFFER_STORE_FORMAT_X : MTBUF_ <0x00000004, "TBUFFER_STORE_FORMAT_X", []>;
-//def TBUFFER_STORE_FORMAT_XY : MTBUF_ <0x00000005, "TBUFFER_STORE_FORMAT_XY", []>;
-//def TBUFFER_STORE_FORMAT_XYZ : MTBUF_ <0x00000006, "TBUFFER_STORE_FORMAT_XYZ", []>;
-//def TBUFFER_STORE_FORMAT_XYZW : MTBUF_ <0x00000007, "TBUFFER_STORE_FORMAT_XYZW", []>;
+def TBUFFER_STORE_FORMAT_X : MTBUF_Store_Helper <0x00000004, "TBUFFER_STORE_FORMAT_X", VReg_32>;
+def TBUFFER_STORE_FORMAT_XY : MTBUF_Store_Helper <0x00000005, "TBUFFER_STORE_FORMAT_XY", VReg_64>;
+def TBUFFER_STORE_FORMAT_XYZ : MTBUF_Store_Helper <0x00000006, "TBUFFER_STORE_FORMAT_XYZ", VReg_128>;
+def TBUFFER_STORE_FORMAT_XYZW : MTBUF_Store_Helper <0x00000007, "TBUFFER_STORE_FORMAT_XYZW", VReg_128>;
let mayLoad = 1 in {
-defm S_LOAD_DWORD : SMRD_Helper <0x00, "S_LOAD_DWORD", SReg_64, SReg_32>;
+// We are using the SGPR_32 and not the SReg_32 register class for 32-bit
+// SMRD instructions, because the SGPR_32 register class does not include M0
+// and writing to M0 from an SMRD instruction will hang the GPU.
+defm S_LOAD_DWORD : SMRD_Helper <0x00, "S_LOAD_DWORD", SReg_64, SGPR_32>;
defm S_LOAD_DWORDX2 : SMRD_Helper <0x01, "S_LOAD_DWORDX2", SReg_64, SReg_64>;
defm S_LOAD_DWORDX4 : SMRD_Helper <0x02, "S_LOAD_DWORDX4", SReg_64, SReg_128>;
defm S_LOAD_DWORDX8 : SMRD_Helper <0x03, "S_LOAD_DWORDX8", SReg_64, SReg_256>;
defm S_LOAD_DWORDX16 : SMRD_Helper <0x04, "S_LOAD_DWORDX16", SReg_64, SReg_512>;
defm S_BUFFER_LOAD_DWORD : SMRD_Helper <
- 0x08, "S_BUFFER_LOAD_DWORD", SReg_128, SReg_32
+ 0x08, "S_BUFFER_LOAD_DWORD", SReg_128, SGPR_32
>;
defm S_BUFFER_LOAD_DWORDX2 : SMRD_Helper <
@@ -494,8 +522,8 @@ defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper <
//def S_MEMTIME : SMRD_ <0x0000001e, "S_MEMTIME", []>;
//def S_DCACHE_INV : SMRD_ <0x0000001f, "S_DCACHE_INV", []>;
-//def IMAGE_LOAD : MIMG_NoPattern_ <"IMAGE_LOAD", 0x00000000>;
-def IMAGE_LOAD_MIP : MIMG_NoSampler_Helper <0x00000001, "IMAGE_LOAD_MIP">;
+defm IMAGE_LOAD : MIMG_NoSampler <0x00000000, "IMAGE_LOAD">;
+defm IMAGE_LOAD_MIP : MIMG_NoSampler <0x00000001, "IMAGE_LOAD_MIP">;
//def IMAGE_LOAD_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_PCK", 0x00000002>;
//def IMAGE_LOAD_PCK_SGN : MIMG_NoPattern_ <"IMAGE_LOAD_PCK_SGN", 0x00000003>;
//def IMAGE_LOAD_MIP_PCK : MIMG_NoPattern_ <"IMAGE_LOAD_MIP_PCK", 0x00000004>;
@@ -504,7 +532,7 @@ def IMAGE_LOAD_MIP : MIMG_NoSampler_Helper <0x00000001, "IMAGE_LOAD_MIP">;
//def IMAGE_STORE_MIP : MIMG_NoPattern_ <"IMAGE_STORE_MIP", 0x00000009>;
//def IMAGE_STORE_PCK : MIMG_NoPattern_ <"IMAGE_STORE_PCK", 0x0000000a>;
//def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"IMAGE_STORE_MIP_PCK", 0x0000000b>;
-def IMAGE_GET_RESINFO : MIMG_NoSampler_Helper <0x0000000e, "IMAGE_GET_RESINFO">;
+defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "IMAGE_GET_RESINFO">;
//def IMAGE_ATOMIC_SWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_SWAP", 0x0000000f>;
//def IMAGE_ATOMIC_CMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_CMPSWAP", 0x00000010>;
//def IMAGE_ATOMIC_ADD : MIMG_NoPattern_ <"IMAGE_ATOMIC_ADD", 0x00000011>;
@@ -522,20 +550,20 @@ def IMAGE_GET_RESINFO : MIMG_NoSampler_Helper <0x0000000e, "IMAGE_GET_RESINFO">;
//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"IMAGE_ATOMIC_FCMPSWAP", 0x0000001d>;
//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMIN", 0x0000001e>;
//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"IMAGE_ATOMIC_FMAX", 0x0000001f>;
-def IMAGE_SAMPLE : MIMG_Sampler_Helper <0x00000020, "IMAGE_SAMPLE">;
+defm IMAGE_SAMPLE : MIMG_Sampler <0x00000020, "IMAGE_SAMPLE">;
//def IMAGE_SAMPLE_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_CL", 0x00000021>;
-def IMAGE_SAMPLE_D : MIMG_Sampler_Helper <0x00000022, "IMAGE_SAMPLE_D">;
+defm IMAGE_SAMPLE_D : MIMG_Sampler <0x00000022, "IMAGE_SAMPLE_D">;
//def IMAGE_SAMPLE_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_D_CL", 0x00000023>;
-def IMAGE_SAMPLE_L : MIMG_Sampler_Helper <0x00000024, "IMAGE_SAMPLE_L">;
-def IMAGE_SAMPLE_B : MIMG_Sampler_Helper <0x00000025, "IMAGE_SAMPLE_B">;
+defm IMAGE_SAMPLE_L : MIMG_Sampler <0x00000024, "IMAGE_SAMPLE_L">;
+defm IMAGE_SAMPLE_B : MIMG_Sampler <0x00000025, "IMAGE_SAMPLE_B">;
//def IMAGE_SAMPLE_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_B_CL", 0x00000026>;
//def IMAGE_SAMPLE_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_LZ", 0x00000027>;
-def IMAGE_SAMPLE_C : MIMG_Sampler_Helper <0x00000028, "IMAGE_SAMPLE_C">;
+defm IMAGE_SAMPLE_C : MIMG_Sampler <0x00000028, "IMAGE_SAMPLE_C">;
//def IMAGE_SAMPLE_C_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_CL", 0x00000029>;
-//def IMAGE_SAMPLE_C_D : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D", 0x0000002a>;
+defm IMAGE_SAMPLE_C_D : MIMG_Sampler <0x0000002a, "IMAGE_SAMPLE_C_D">;
//def IMAGE_SAMPLE_C_D_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_D_CL", 0x0000002b>;
-def IMAGE_SAMPLE_C_L : MIMG_Sampler_Helper <0x0000002c, "IMAGE_SAMPLE_C_L">;
-def IMAGE_SAMPLE_C_B : MIMG_Sampler_Helper <0x0000002d, "IMAGE_SAMPLE_C_B">;
+defm IMAGE_SAMPLE_C_L : MIMG_Sampler <0x0000002c, "IMAGE_SAMPLE_C_L">;
+defm IMAGE_SAMPLE_C_B : MIMG_Sampler <0x0000002d, "IMAGE_SAMPLE_C_B">;
//def IMAGE_SAMPLE_C_B_CL : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_B_CL", 0x0000002e>;
//def IMAGE_SAMPLE_C_LZ : MIMG_NoPattern_ <"IMAGE_SAMPLE_C_LZ", 0x0000002f>;
//def IMAGE_SAMPLE_O : MIMG_NoPattern_ <"IMAGE_SAMPLE_O", 0x00000030>;
@@ -597,15 +625,21 @@ defm V_MOV_B32 : VOP1_32 <0x00000001, "V_MOV_B32", []>;
} // End neverHasSideEffects = 1, isMoveImm = 1
defm V_READFIRSTLANE_B32 : VOP1_32 <0x00000002, "V_READFIRSTLANE_B32", []>;
-//defm V_CVT_I32_F64 : VOP1_32 <0x00000003, "V_CVT_I32_F64", []>;
-//defm V_CVT_F64_I32 : VOP1_64 <0x00000004, "V_CVT_F64_I32", []>;
+defm V_CVT_I32_F64 : VOP1_32_64 <0x00000003, "V_CVT_I32_F64",
+ [(set i32:$dst, (fp_to_sint f64:$src0))]
+>;
+defm V_CVT_F64_I32 : VOP1_64_32 <0x00000004, "V_CVT_F64_I32",
+ [(set f64:$dst, (sint_to_fp i32:$src0))]
+>;
defm V_CVT_F32_I32 : VOP1_32 <0x00000005, "V_CVT_F32_I32",
[(set f32:$dst, (sint_to_fp i32:$src0))]
>;
defm V_CVT_F32_U32 : VOP1_32 <0x00000006, "V_CVT_F32_U32",
[(set f32:$dst, (uint_to_fp i32:$src0))]
>;
-defm V_CVT_U32_F32 : VOP1_32 <0x00000007, "V_CVT_U32_F32", []>;
+defm V_CVT_U32_F32 : VOP1_32 <0x00000007, "V_CVT_U32_F32",
+ [(set i32:$dst, (fp_to_uint f32:$src0))]
+>;
defm V_CVT_I32_F32 : VOP1_32 <0x00000008, "V_CVT_I32_F32",
[(set i32:$dst, (fp_to_sint f32:$src0))]
>;
@@ -615,8 +649,12 @@ defm V_MOV_FED_B32 : VOP1_32 <0x00000009, "V_MOV_FED_B32", []>;
//defm V_CVT_RPI_I32_F32 : VOP1_32 <0x0000000c, "V_CVT_RPI_I32_F32", []>;
//defm V_CVT_FLR_I32_F32 : VOP1_32 <0x0000000d, "V_CVT_FLR_I32_F32", []>;
//defm V_CVT_OFF_F32_I4 : VOP1_32 <0x0000000e, "V_CVT_OFF_F32_I4", []>;
-//defm V_CVT_F32_F64 : VOP1_32 <0x0000000f, "V_CVT_F32_F64", []>;
-//defm V_CVT_F64_F32 : VOP1_64 <0x00000010, "V_CVT_F64_F32", []>;
+defm V_CVT_F32_F64 : VOP1_32_64 <0x0000000f, "V_CVT_F32_F64",
+ [(set f32:$dst, (fround f64:$src0))]
+>;
+defm V_CVT_F64_F32 : VOP1_64_32 <0x00000010, "V_CVT_F64_F32",
+ [(set f64:$dst, (fextend f32:$src0))]
+>;
//defm V_CVT_F32_UBYTE0 : VOP1_32 <0x00000011, "V_CVT_F32_UBYTE0", []>;
//defm V_CVT_F32_UBYTE1 : VOP1_32 <0x00000012, "V_CVT_F32_UBYTE1", []>;
//defm V_CVT_F32_UBYTE2 : VOP1_32 <0x00000013, "V_CVT_F32_UBYTE2", []>;
@@ -657,12 +695,18 @@ defm V_RSQ_LEGACY_F32 : VOP1_32 <
[(set f32:$dst, (int_AMDGPU_rsq f32:$src0))]
>;
defm V_RSQ_F32 : VOP1_32 <0x0000002e, "V_RSQ_F32", []>;
-defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64", []>;
+defm V_RCP_F64 : VOP1_64 <0x0000002f, "V_RCP_F64",
+ [(set f64:$dst, (fdiv FP_ONE, f64:$src0))]
+>;
defm V_RCP_CLAMP_F64 : VOP1_64 <0x00000030, "V_RCP_CLAMP_F64", []>;
defm V_RSQ_F64 : VOP1_64 <0x00000031, "V_RSQ_F64", []>;
defm V_RSQ_CLAMP_F64 : VOP1_64 <0x00000032, "V_RSQ_CLAMP_F64", []>;
-defm V_SQRT_F32 : VOP1_32 <0x00000033, "V_SQRT_F32", []>;
-defm V_SQRT_F64 : VOP1_64 <0x00000034, "V_SQRT_F64", []>;
+defm V_SQRT_F32 : VOP1_32 <0x00000033, "V_SQRT_F32",
+ [(set f32:$dst, (fsqrt f32:$src0))]
+>;
+defm V_SQRT_F64 : VOP1_64 <0x00000034, "V_SQRT_F64",
+ [(set f64:$dst, (fsqrt f64:$src0))]
+>;
defm V_SIN_F32 : VOP1_32 <0x00000035, "V_SIN_F32", []>;
defm V_COS_F32 : VOP1_32 <0x00000036, "V_COS_F32", []>;
defm V_NOT_B32 : VOP1_32 <0x00000037, "V_NOT_B32", []>;
@@ -768,9 +812,18 @@ def S_CBRANCH_EXECNZ : SOPP <
} // End isBranch = 1
} // End isTerminator = 1
-//def S_BARRIER : SOPP_ <0x0000000a, "S_BARRIER", []>;
let hasSideEffects = 1 in {
-def S_WAITCNT : SOPP <0x0000000c, (ins i32imm:$simm16), "S_WAITCNT $simm16",
+def S_BARRIER : SOPP <0x0000000a, (ins), "S_BARRIER",
+ [(int_AMDGPU_barrier_local)]
+> {
+ let SIMM16 = 0;
+ let isBarrier = 1;
+ let hasCtrlDep = 1;
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+def S_WAITCNT : SOPP <0x0000000c, (ins WAIT_FLAG:$simm16), "S_WAITCNT $simm16",
[]
>;
} // End hasSideEffects
@@ -806,6 +859,23 @@ def : Pat <
(V_CNDMASK_B32_e64 $src0, $src1, $src2)
>;
+def : Pat <
+ (i32 (trunc i64:$val)),
+ (EXTRACT_SUBREG $val, sub0)
+>;
+
+//use two V_CNDMASK_B32_e64 instructions for f64
+def : Pat <
+ (f64 (select i1:$src2, f64:$src1, f64:$src0)),
+ (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (V_CNDMASK_B32_e64 (EXTRACT_SUBREG $src0, sub0),
+ (EXTRACT_SUBREG $src1, sub0),
+ $src2), sub0),
+ (V_CNDMASK_B32_e64 (EXTRACT_SUBREG $src0, sub1),
+ (EXTRACT_SUBREG $src1, sub1),
+ $src2), sub1)
+>;
+
defm V_READLANE_B32 : VOP2_32 <0x00000001, "V_READLANE_B32", []>;
defm V_WRITELANE_B32 : VOP2_32 <0x00000002, "V_WRITELANE_B32", []>;
@@ -833,14 +903,16 @@ defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32",
[(set f32:$dst, (fmul f32:$src0, f32:$src1))]
>;
-} // End isCommutable = 1
-//defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", []>;
+defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24",
+ [(set i32:$dst, (mul I24:$src0, I24:$src1))]
+>;
//defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>;
-//defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", []>;
+defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24",
+ [(set i32:$dst, (mul U24:$src0, U24:$src1))]
+>;
//defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>;
-let isCommutable = 1 in {
defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32",
[(set f32:$dst, (AMDGPUfmin f32:$src0, f32:$src1))]
@@ -875,9 +947,13 @@ defm V_ASHR_I32 : VOP2_32 <0x00000017, "V_ASHR_I32",
>;
defm V_ASHRREV_I32 : VOP2_32 <0x00000018, "V_ASHRREV_I32", [], "V_ASHR_I32">;
+let hasPostISelHook = 1 in {
+
defm V_LSHL_B32 : VOP2_32 <0x00000019, "V_LSHL_B32",
[(set i32:$dst, (shl i32:$src0, i32:$src1))]
>;
+
+}
defm V_LSHLREV_B32 : VOP2_32 <0x0000001a, "V_LSHLREV_B32", [], "V_LSHL_B32">;
defm V_AND_B32 : VOP2_32 <0x0000001b, "V_AND_B32",
@@ -897,20 +973,17 @@ defm V_MAC_F32 : VOP2_32 <0x0000001f, "V_MAC_F32", []>;
defm V_MADMK_F32 : VOP2_32 <0x00000020, "V_MADMK_F32", []>;
defm V_MADAK_F32 : VOP2_32 <0x00000021, "V_MADAK_F32", []>;
//defm V_BCNT_U32_B32 : VOP2_32 <0x00000022, "V_BCNT_U32_B32", []>;
-//defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>;
-//defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>;
+defm V_MBCNT_LO_U32_B32 : VOP2_32 <0x00000023, "V_MBCNT_LO_U32_B32", []>;
+defm V_MBCNT_HI_U32_B32 : VOP2_32 <0x00000024, "V_MBCNT_HI_U32_B32", []>;
let isCommutable = 1, Defs = [VCC] in { // Carry-out goes to VCC
-defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32",
- [(set i32:$dst, (add (i32 VSrc_32:$src0), (i32 VReg_32:$src1)))]
->;
-
-defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32",
- [(set i32:$dst, (sub i32:$src0, i32:$src1))]
->;
+// No patterns so that the scalar instructions are always selected.
+// The scalar versions will be replaced with vector when needed later.
+defm V_ADD_I32 : VOP2b_32 <0x00000025, "V_ADD_I32", []>;
+defm V_SUB_I32 : VOP2b_32 <0x00000026, "V_SUB_I32", []>;
defm V_SUBREV_I32 : VOP2b_32 <0x00000027, "V_SUBREV_I32", [], "V_SUB_I32">;
-let Uses = [VCC] in { // Carry-out comes from VCC
+let Uses = [VCC] in { // Carry-in comes from VCC
defm V_ADDC_U32 : VOP2b_32 <0x00000028, "V_ADDC_U32", []>;
defm V_SUBB_U32 : VOP2b_32 <0x00000029, "V_SUBB_U32", []>;
defm V_SUBBREV_U32 : VOP2b_32 <0x0000002a, "V_SUBBREV_U32", [], "V_SUBB_U32">;
@@ -948,8 +1021,12 @@ let neverHasSideEffects = 1 in {
def V_MAD_LEGACY_F32 : VOP3_32 <0x00000140, "V_MAD_LEGACY_F32", []>;
def V_MAD_F32 : VOP3_32 <0x00000141, "V_MAD_F32", []>;
-//def V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24", []>;
-//def V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24", []>;
+def V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24",
+ [(set i32:$dst, (add (mul I24:$src0, I24:$src1), i32:$src2))]
+>;
+def V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24",
+ [(set i32:$dst, (add (mul U24:$src0, U24:$src1), i32:$src2))]
+>;
} // End neverHasSideEffects
def V_CUBEID_F32 : VOP3_32 <0x00000144, "V_CUBEID_F32", []>;
@@ -960,10 +1037,16 @@ def V_BFE_U32 : VOP3_32 <0x00000148, "V_BFE_U32", []>;
def V_BFE_I32 : VOP3_32 <0x00000149, "V_BFE_I32", []>;
def V_BFI_B32 : VOP3_32 <0x0000014a, "V_BFI_B32", []>;
defm : BFIPatterns <V_BFI_B32>;
-def V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32", []>;
-def V_FMA_F64 : VOP3_64 <0x0000014c, "V_FMA_F64", []>;
+def V_FMA_F32 : VOP3_32 <0x0000014b, "V_FMA_F32",
+ [(set f32:$dst, (fma f32:$src0, f32:$src1, f32:$src2))]
+>;
+def V_FMA_F64 : VOP3_64 <0x0000014c, "V_FMA_F64",
+ [(set f64:$dst, (fma f64:$src0, f64:$src1, f64:$src2))]
+>;
//def V_LERP_U8 : VOP3_U8 <0x0000014d, "V_LERP_U8", []>;
def V_ALIGNBIT_B32 : VOP3_32 <0x0000014e, "V_ALIGNBIT_B32", []>;
+def : ROTRPattern <V_ALIGNBIT_B32>;
+
def V_ALIGNBYTE_B32 : VOP3_32 <0x0000014f, "V_ALIGNBYTE_B32", []>;
def V_MULLIT_F32 : VOP3_32 <0x00000150, "V_MULLIT_F32", []>;
////def V_MIN3_F32 : VOP3_MIN3 <0x00000151, "V_MIN3_F32", []>;
@@ -982,13 +1065,36 @@ def V_SAD_U32 : VOP3_32 <0x0000015d, "V_SAD_U32", []>;
////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "V_CVT_PK_U8_F32", []>;
def V_DIV_FIXUP_F32 : VOP3_32 <0x0000015f, "V_DIV_FIXUP_F32", []>;
def V_DIV_FIXUP_F64 : VOP3_64 <0x00000160, "V_DIV_FIXUP_F64", []>;
-def V_LSHL_B64 : VOP3_64 <0x00000161, "V_LSHL_B64", []>;
-def V_LSHR_B64 : VOP3_64 <0x00000162, "V_LSHR_B64", []>;
-def V_ASHR_I64 : VOP3_64 <0x00000163, "V_ASHR_I64", []>;
+
+def V_LSHL_B64 : VOP3_64_Shift <0x00000161, "V_LSHL_B64",
+ [(set i64:$dst, (shl i64:$src0, i32:$src1))]
+>;
+def V_LSHR_B64 : VOP3_64_Shift <0x00000162, "V_LSHR_B64",
+ [(set i64:$dst, (srl i64:$src0, i32:$src1))]
+>;
+def V_ASHR_I64 : VOP3_64_Shift <0x00000163, "V_ASHR_I64",
+ [(set i64:$dst, (sra i64:$src0, i32:$src1))]
+>;
+
+let isCommutable = 1 in {
+
def V_ADD_F64 : VOP3_64 <0x00000164, "V_ADD_F64", []>;
def V_MUL_F64 : VOP3_64 <0x00000165, "V_MUL_F64", []>;
def V_MIN_F64 : VOP3_64 <0x00000166, "V_MIN_F64", []>;
def V_MAX_F64 : VOP3_64 <0x00000167, "V_MAX_F64", []>;
+
+} // isCommutable = 1
+
+def : Pat <
+ (fadd f64:$src0, f64:$src1),
+ (V_ADD_F64 $src0, $src1, (i64 0))
+>;
+
+def : Pat <
+ (fmul f64:$src0, f64:$src1),
+ (V_MUL_F64 $src0, $src1, (i64 0))
+>;
+
def V_LDEXP_F64 : VOP3_64 <0x00000168, "V_LDEXP_F64", []>;
let isCommutable = 1 in {
@@ -1023,12 +1129,31 @@ def V_DIV_FMAS_F64 : VOP3_64 <0x00000170, "V_DIV_FMAS_F64", []>;
//def V_QSAD_U8 : VOP3_U8 <0x00000172, "V_QSAD_U8", []>;
//def V_MQSAD_U8 : VOP3_U8 <0x00000173, "V_MQSAD_U8", []>;
def V_TRIG_PREOP_F64 : VOP3_64 <0x00000174, "V_TRIG_PREOP_F64", []>;
+
+let Defs = [SCC] in { // Carry out goes to SCC
+let isCommutable = 1 in {
def S_ADD_U32 : SOP2_32 <0x00000000, "S_ADD_U32", []>;
+def S_ADD_I32 : SOP2_32 <0x00000002, "S_ADD_I32",
+ [(set i32:$dst, (add SSrc_32:$src0, SSrc_32:$src1))]
+>;
+} // End isCommutable = 1
+
def S_SUB_U32 : SOP2_32 <0x00000001, "S_SUB_U32", []>;
-def S_ADD_I32 : SOP2_32 <0x00000002, "S_ADD_I32", []>;
-def S_SUB_I32 : SOP2_32 <0x00000003, "S_SUB_I32", []>;
-def S_ADDC_U32 : SOP2_32 <0x00000004, "S_ADDC_U32", []>;
-def S_SUBB_U32 : SOP2_32 <0x00000005, "S_SUBB_U32", []>;
+def S_SUB_I32 : SOP2_32 <0x00000003, "S_SUB_I32",
+ [(set i32:$dst, (sub SSrc_32:$src0, SSrc_32:$src1))]
+>;
+
+let Uses = [SCC] in { // Carry in comes from SCC
+let isCommutable = 1 in {
+def S_ADDC_U32 : SOP2_32 <0x00000004, "S_ADDC_U32",
+ [(set i32:$dst, (adde (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
+} // End isCommutable = 1
+
+def S_SUBB_U32 : SOP2_32 <0x00000005, "S_SUBB_U32",
+ [(set i32:$dst, (sube (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
+} // End Uses = [SCC]
+} // End Defs = [SCC]
+
def S_MIN_I32 : SOP2_32 <0x00000006, "S_MIN_I32", []>;
def S_MIN_U32 : SOP2_32 <0x00000007, "S_MIN_U32", []>;
def S_MAX_I32 : SOP2_32 <0x00000008, "S_MAX_I32", []>;
@@ -1060,7 +1185,9 @@ def : Pat <
(S_OR_B64 $src0, $src1)
>;
def S_XOR_B32 : SOP2_32 <0x00000012, "S_XOR_B32", []>;
-def S_XOR_B64 : SOP2_64 <0x00000013, "S_XOR_B64", []>;
+def S_XOR_B64 : SOP2_64 <0x00000013, "S_XOR_B64",
+ [(set i1:$dst, (xor i1:$src0, i1:$src1))]
+>;
def S_ANDN2_B32 : SOP2_32 <0x00000014, "S_ANDN2_B32", []>;
def S_ANDN2_B64 : SOP2_64 <0x00000015, "S_ANDN2_B64", []>;
def S_ORN2_B32 : SOP2_32 <0x00000016, "S_ORN2_B32", []>;
@@ -1071,12 +1198,31 @@ def S_NOR_B32 : SOP2_32 <0x0000001a, "S_NOR_B32", []>;
def S_NOR_B64 : SOP2_64 <0x0000001b, "S_NOR_B64", []>;
def S_XNOR_B32 : SOP2_32 <0x0000001c, "S_XNOR_B32", []>;
def S_XNOR_B64 : SOP2_64 <0x0000001d, "S_XNOR_B64", []>;
-def S_LSHL_B32 : SOP2_32 <0x0000001e, "S_LSHL_B32", []>;
-def S_LSHL_B64 : SOP2_64 <0x0000001f, "S_LSHL_B64", []>;
-def S_LSHR_B32 : SOP2_32 <0x00000020, "S_LSHR_B32", []>;
-def S_LSHR_B64 : SOP2_64 <0x00000021, "S_LSHR_B64", []>;
-def S_ASHR_I32 : SOP2_32 <0x00000022, "S_ASHR_I32", []>;
-def S_ASHR_I64 : SOP2_64 <0x00000023, "S_ASHR_I64", []>;
+
+// Use added complexity so these patterns are preferred to the VALU patterns.
+let AddedComplexity = 1 in {
+
+def S_LSHL_B32 : SOP2_32 <0x0000001e, "S_LSHL_B32",
+ [(set i32:$dst, (shl i32:$src0, i32:$src1))]
+>;
+def S_LSHL_B64 : SOP2_SHIFT_64 <0x0000001f, "S_LSHL_B64",
+ [(set i64:$dst, (shl i64:$src0, i32:$src1))]
+>;
+def S_LSHR_B32 : SOP2_32 <0x00000020, "S_LSHR_B32",
+ [(set i32:$dst, (srl i32:$src0, i32:$src1))]
+>;
+def S_LSHR_B64 : SOP2_SHIFT_64 <0x00000021, "S_LSHR_B64",
+ [(set i64:$dst, (srl i64:$src0, i32:$src1))]
+>;
+def S_ASHR_I32 : SOP2_32 <0x00000022, "S_ASHR_I32",
+ [(set i32:$dst, (sra i32:$src0, i32:$src1))]
+>;
+def S_ASHR_I64 : SOP2_SHIFT_64 <0x00000023, "S_ASHR_I64",
+ [(set i64:$dst, (sra i64:$src0, i32:$src1))]
+>;
+
+} // End AddedComplexity = 1
+
def S_BFM_B32 : SOP2_32 <0x00000024, "S_BFM_B32", []>;
def S_BFM_B64 : SOP2_64 <0x00000025, "S_BFM_B64", []>;
def S_MUL_I32 : SOP2_32 <0x00000026, "S_MUL_I32", []>;
@@ -1096,7 +1242,7 @@ def LOAD_CONST : AMDGPUShaderInst <
[(set GPRF32:$dst, (int_AMDGPU_load_const imm:$src))]
>;
-// SI Psuedo instructions. These are used by the CFG structurizer pass
+// SI pseudo instructions. These are used by the CFG structurizer pass
// and should be lowered to ISA instructions prior to codegen.
let mayLoad = 1, mayStore = 1, hasSideEffects = 1,
@@ -1169,6 +1315,36 @@ def SI_KILL : InstSI <
let Uses = [EXEC], Defs = [EXEC,VCC,M0] in {
+//defm SI_ : RegisterLoadStore <VReg_32, FRAMEri64, ADDRIndirect>;
+
+let UseNamedOperandTable = 1 in {
+
+def SI_RegisterLoad : AMDGPUShaderInst <
+ (outs VReg_32:$dst, SReg_64:$temp),
+ (ins FRAMEri64:$addr, i32imm:$chan),
+ "", []
+> {
+ let isRegisterLoad = 1;
+ let mayLoad = 1;
+}
+
+class SIRegStore<dag outs> : AMDGPUShaderInst <
+ outs,
+ (ins VReg_32:$val, FRAMEri64:$addr, i32imm:$chan),
+ "", []
+> {
+ let isRegisterStore = 1;
+ let mayStore = 1;
+}
+
+let usesCustomInserter = 1 in {
+def SI_RegisterStorePseudo : SIRegStore<(outs)>;
+} // End usesCustomInserter = 1
+def SI_RegisterStore : SIRegStore<(outs SReg_64:$temp)>;
+
+
+} // End UseNamedOperandTable = 1
+
def SI_INDIRECT_SRC : InstSI <
(outs VReg_32:$dst, SReg_64:$temp),
(ins unknown:$src, VSrc_32:$idx, i32imm:$off),
@@ -1185,6 +1361,7 @@ class SI_INDIRECT_DST<RegisterClass rc> : InstSI <
let Constraints = "$src = $dst";
}
+def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VReg_32>;
def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
@@ -1192,6 +1369,25 @@ def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
} // Uses = [EXEC,VCC,M0], Defs = [EXEC,VCC,M0]
+let usesCustomInserter = 1 in {
+
+// This pseudo instruction takes a pointer as input and outputs a resource
+// constant that can be used with the ADDR64 MUBUF instructions.
+def SI_ADDR64_RSRC : InstSI <
+ (outs SReg_128:$srsrc),
+ (ins SReg_64:$ptr),
+ "", []
+>;
+
+def V_SUB_F64 : InstSI <
+ (outs VReg_64:$dst),
+ (ins VReg_64:$src0, VReg_64:$src1),
+ "V_SUB_F64 $dst, $src0, $src1",
+ []
+>;
+
+} // end usesCustomInserter
+
} // end IsCodeGenOnly, isPseudo
def : Pat<
@@ -1206,10 +1402,8 @@ def : Pat <
/* int_SI_vs_load_input */
def : Pat<
- (int_SI_vs_load_input v16i8:$tlst, IMM12bit:$attr_offset,
- i32:$buf_idx_vgpr),
- (BUFFER_LOAD_FORMAT_XYZW imm:$attr_offset, 0, 1, 0, 0, 0,
- $buf_idx_vgpr, $tlst, 0, 0, 0)
+ (SIload_input i128:$tlst, IMM12bit:$attr_offset, i32:$buf_idx_vgpr),
+ (BUFFER_LOAD_FORMAT_XYZW_IDXEN $tlst, $buf_idx_vgpr, imm:$attr_offset)
>;
/* int_SI_export */
@@ -1220,66 +1414,94 @@ def : Pat <
$src0, $src1, $src2, $src3)
>;
+def : Pat <
+ (f64 (fsub f64:$src0, f64:$src1)),
+ (V_SUB_F64 $src0, $src1)
+>;
+
/********** ======================= **********/
/********** Image sampling patterns **********/
/********** ======================= **********/
-/* int_SI_sample for simple 1D texture lookup */
+/* SIsample for simple 1D texture lookup */
def : Pat <
- (int_SI_sample v1i32:$addr, v32i8:$rsrc, v16i8:$sampler, imm),
- (IMAGE_SAMPLE 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
+ (SIsample i32:$addr, v32i8:$rsrc, i128:$sampler, imm),
+ (IMAGE_SAMPLE_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-class SamplePattern<Intrinsic name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, imm),
+class SamplePattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
+ (name vt:$addr, v32i8:$rsrc, i128:$sampler, imm),
(opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-class SampleRectPattern<Intrinsic name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_RECT),
+class SampleRectPattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
+ (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_RECT),
(opcode 0xf, 1, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-class SampleArrayPattern<Intrinsic name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_ARRAY),
+class SampleArrayPattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
+ (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_ARRAY),
(opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-class SampleShadowPattern<Intrinsic name, MIMG opcode,
+class SampleShadowPattern<SDNode name, MIMG opcode,
ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_SHADOW),
+ (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_SHADOW),
(opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-class SampleShadowArrayPattern<Intrinsic name, MIMG opcode,
+class SampleShadowArrayPattern<SDNode name, MIMG opcode,
ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v16i8:$sampler, TEX_SHADOW_ARRAY),
+ (name vt:$addr, v32i8:$rsrc, i128:$sampler, TEX_SHADOW_ARRAY),
(opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler)
>;
-/* int_SI_sample* for texture lookups consuming more address parameters */
-multiclass SamplePatterns<ValueType addr_type> {
- def : SamplePattern <int_SI_sample, IMAGE_SAMPLE, addr_type>;
- def : SampleRectPattern <int_SI_sample, IMAGE_SAMPLE, addr_type>;
- def : SampleArrayPattern <int_SI_sample, IMAGE_SAMPLE, addr_type>;
- def : SampleShadowPattern <int_SI_sample, IMAGE_SAMPLE_C, addr_type>;
- def : SampleShadowArrayPattern <int_SI_sample, IMAGE_SAMPLE_C, addr_type>;
-
- def : SamplePattern <int_SI_samplel, IMAGE_SAMPLE_L, addr_type>;
- def : SampleArrayPattern <int_SI_samplel, IMAGE_SAMPLE_L, addr_type>;
- def : SampleShadowPattern <int_SI_samplel, IMAGE_SAMPLE_C_L, addr_type>;
- def : SampleShadowArrayPattern <int_SI_samplel, IMAGE_SAMPLE_C_L, addr_type>;
-
- def : SamplePattern <int_SI_sampleb, IMAGE_SAMPLE_B, addr_type>;
- def : SampleArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_B, addr_type>;
- def : SampleShadowPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_type>;
- def : SampleShadowArrayPattern <int_SI_sampleb, IMAGE_SAMPLE_C_B, addr_type>;
+/* SIsample* for texture lookups consuming more address parameters */
+multiclass SamplePatterns<MIMG sample, MIMG sample_c, MIMG sample_l,
+ MIMG sample_c_l, MIMG sample_b, MIMG sample_c_b,
+MIMG sample_d, MIMG sample_c_d, ValueType addr_type> {
+ def : SamplePattern <SIsample, sample, addr_type>;
+ def : SampleRectPattern <SIsample, sample, addr_type>;
+ def : SampleArrayPattern <SIsample, sample, addr_type>;
+ def : SampleShadowPattern <SIsample, sample_c, addr_type>;
+ def : SampleShadowArrayPattern <SIsample, sample_c, addr_type>;
+
+ def : SamplePattern <SIsamplel, sample_l, addr_type>;
+ def : SampleArrayPattern <SIsamplel, sample_l, addr_type>;
+ def : SampleShadowPattern <SIsamplel, sample_c_l, addr_type>;
+ def : SampleShadowArrayPattern <SIsamplel, sample_c_l, addr_type>;
+
+ def : SamplePattern <SIsampleb, sample_b, addr_type>;
+ def : SampleArrayPattern <SIsampleb, sample_b, addr_type>;
+ def : SampleShadowPattern <SIsampleb, sample_c_b, addr_type>;
+ def : SampleShadowArrayPattern <SIsampleb, sample_c_b, addr_type>;
+
+ def : SamplePattern <SIsampled, sample_d, addr_type>;
+ def : SampleArrayPattern <SIsampled, sample_d, addr_type>;
+ def : SampleShadowPattern <SIsampled, sample_c_d, addr_type>;
+ def : SampleShadowArrayPattern <SIsampled, sample_c_d, addr_type>;
}
-defm : SamplePatterns<v2i32>;
-defm : SamplePatterns<v4i32>;
-defm : SamplePatterns<v8i32>;
-defm : SamplePatterns<v16i32>;
+defm : SamplePatterns<IMAGE_SAMPLE_V4_V2, IMAGE_SAMPLE_C_V4_V2,
+ IMAGE_SAMPLE_L_V4_V2, IMAGE_SAMPLE_C_L_V4_V2,
+ IMAGE_SAMPLE_B_V4_V2, IMAGE_SAMPLE_C_B_V4_V2,
+ IMAGE_SAMPLE_D_V4_V2, IMAGE_SAMPLE_C_D_V4_V2,
+ v2i32>;
+defm : SamplePatterns<IMAGE_SAMPLE_V4_V4, IMAGE_SAMPLE_C_V4_V4,
+ IMAGE_SAMPLE_L_V4_V4, IMAGE_SAMPLE_C_L_V4_V4,
+ IMAGE_SAMPLE_B_V4_V4, IMAGE_SAMPLE_C_B_V4_V4,
+ IMAGE_SAMPLE_D_V4_V4, IMAGE_SAMPLE_C_D_V4_V4,
+ v4i32>;
+defm : SamplePatterns<IMAGE_SAMPLE_V4_V8, IMAGE_SAMPLE_C_V4_V8,
+ IMAGE_SAMPLE_L_V4_V8, IMAGE_SAMPLE_C_L_V4_V8,
+ IMAGE_SAMPLE_B_V4_V8, IMAGE_SAMPLE_C_B_V4_V8,
+ IMAGE_SAMPLE_D_V4_V8, IMAGE_SAMPLE_C_D_V4_V8,
+ v8i32>;
+defm : SamplePatterns<IMAGE_SAMPLE_V4_V16, IMAGE_SAMPLE_C_V4_V16,
+ IMAGE_SAMPLE_L_V4_V16, IMAGE_SAMPLE_C_L_V4_V16,
+ IMAGE_SAMPLE_B_V4_V16, IMAGE_SAMPLE_C_B_V4_V16,
+ IMAGE_SAMPLE_D_V4_V16, IMAGE_SAMPLE_C_D_V4_V16,
+ v16i32>;
/* int_SI_imageload for texture fetches consuming varying address parameters */
class ImageLoadPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
@@ -1292,23 +1514,46 @@ class ImageLoadArrayPattern<Intrinsic name, MIMG opcode, ValueType addr_type> :
(opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc)
>;
-multiclass ImageLoadPatterns<ValueType addr_type> {
- def : ImageLoadPattern <int_SI_imageload, IMAGE_LOAD_MIP, addr_type>;
- def : ImageLoadArrayPattern <int_SI_imageload, IMAGE_LOAD_MIP, addr_type>;
+class ImageLoadMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
+ (name addr_type:$addr, v32i8:$rsrc, TEX_MSAA),
+ (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc)
+>;
+
+class ImageLoadArrayMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
+ (name addr_type:$addr, v32i8:$rsrc, TEX_ARRAY_MSAA),
+ (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc)
+>;
+
+multiclass ImageLoadPatterns<MIMG opcode, ValueType addr_type> {
+ def : ImageLoadPattern <int_SI_imageload, opcode, addr_type>;
+ def : ImageLoadArrayPattern <int_SI_imageload, opcode, addr_type>;
+}
+
+multiclass ImageLoadMSAAPatterns<MIMG opcode, ValueType addr_type> {
+ def : ImageLoadMSAAPattern <int_SI_imageload, opcode, addr_type>;
+ def : ImageLoadArrayMSAAPattern <int_SI_imageload, opcode, addr_type>;
}
-defm : ImageLoadPatterns<v2i32>;
-defm : ImageLoadPatterns<v4i32>;
+defm : ImageLoadPatterns<IMAGE_LOAD_MIP_V4_V2, v2i32>;
+defm : ImageLoadPatterns<IMAGE_LOAD_MIP_V4_V4, v4i32>;
+
+defm : ImageLoadMSAAPatterns<IMAGE_LOAD_V4_V2, v2i32>;
+defm : ImageLoadMSAAPatterns<IMAGE_LOAD_V4_V4, v4i32>;
/* Image resource information */
def : Pat <
(int_SI_resinfo i32:$mipid, v32i8:$rsrc, imm),
- (IMAGE_GET_RESINFO 0xf, 0, 0, 0, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
+ (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
>;
def : Pat <
(int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY),
- (IMAGE_GET_RESINFO 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
+ (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
+>;
+
+def : Pat <
+ (int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY_MSAA),
+ (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
>;
/********** ============================================ **********/
@@ -1379,22 +1624,30 @@ foreach Index = 0-15 in {
>;
}
-def : Vector1_Build <v1i32, i32, VReg_32>;
-def : Vector2_Build <v2i32, i32>;
-def : Vector2_Build <v2f32, f32>;
-def : Vector4_Build <v4i32, i32>;
-def : Vector4_Build <v4f32, f32>;
-def : Vector8_Build <v8i32, i32>;
-def : Vector8_Build <v8f32, f32>;
-def : Vector16_Build <v16i32, i32>;
-def : Vector16_Build <v16f32, f32>;
-
def : BitConvert <i32, f32, SReg_32>;
def : BitConvert <i32, f32, VReg_32>;
def : BitConvert <f32, i32, SReg_32>;
def : BitConvert <f32, i32, VReg_32>;
+def : BitConvert <i64, f64, VReg_64>;
+
+def : BitConvert <f64, i64, VReg_64>;
+
+def : BitConvert <v2f32, v2i32, VReg_64>;
+def : BitConvert <v2i32, v2f32, VReg_64>;
+def : BitConvert <v2i32, i64, VReg_64>;
+
+def : BitConvert <v4f32, v4i32, VReg_128>;
+def : BitConvert <v4i32, v4f32, VReg_128>;
+def : BitConvert <v4i32, i128, VReg_128>;
+def : BitConvert <i128, v4i32, VReg_128>;
+
+def : BitConvert <v8i32, v32i8, SReg_256>;
+def : BitConvert <v32i8, v8i32, SReg_256>;
+def : BitConvert <v8i32, v32i8, VReg_256>;
+def : BitConvert <v32i8, v8i32, VReg_256>;
+
/********** =================== **********/
/********** Src & Dst modifiers **********/
/********** =================== **********/
@@ -1422,6 +1675,16 @@ def : Pat <
/********** ================== **********/
def : Pat <
+ (SGPRImm<(i32 imm)>:$imm),
+ (S_MOV_B32 imm:$imm)
+>;
+
+def : Pat <
+ (SGPRImm<(f32 fpimm)>:$imm),
+ (S_MOV_B32 fpimm:$imm)
+>;
+
+def : Pat <
(i32 imm:$imm),
(V_MOV_B32_e32 imm:$imm)
>;
@@ -1449,6 +1712,13 @@ def : Pat <
(S_MOV_B32 (i32 (HI32 imm:$imm))), sub1)
>;
+def : Pat <
+ (f64 fpimm:$imm),
+ (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (V_MOV_B32_e32 (f32 (LO32f fpimm:$imm))), sub0),
+ (V_MOV_B32_e32 (f32 (HI32f fpimm:$imm))), sub1)
+>;
+
/********** ===================== **********/
/********** Interpolation Paterns **********/
/********** ===================== **********/
@@ -1483,6 +1753,11 @@ def : Pat<
(V_MUL_F32_e32 $src0, (V_RCP_F32_e32 $src1))
>;
+def : Pat<
+ (fdiv f64:$src0, f64:$src1),
+ (V_MUL_F64 $src0, (V_RCP_F64_e32 $src1), (i64 0))
+>;
+
def : Pat <
(fcos f32:$src0),
(V_COS_F32_e32 (V_MUL_F32_e32 $src0, (V_MOV_B32_e32 CONST.TWO_PI_INV)))
@@ -1521,20 +1796,20 @@ def : Pat <
// 1. Offset as 8bit DWORD immediate
def : Pat <
- (int_SI_load_const v16i8:$sbase, IMM8bitDWORD:$offset),
+ (SIload_constant i128:$sbase, IMM8bitDWORD:$offset),
(S_BUFFER_LOAD_DWORD_IMM $sbase, IMM8bitDWORD:$offset)
>;
// 2. Offset loaded in an 32bit SGPR
def : Pat <
- (int_SI_load_const v16i8:$sbase, imm:$offset),
+ (SIload_constant i128:$sbase, imm:$offset),
(S_BUFFER_LOAD_DWORD_SGPR $sbase, (S_MOV_B32 imm:$offset))
>;
// 3. Offset in an 32Bit VGPR
def : Pat <
- (int_SI_load_const v16i8:$sbase, i32:$voff),
- (BUFFER_LOAD_DWORD 0, 1, 0, 0, 0, 0, $voff, $sbase, 0, 0, 0)
+ (SIload_constant i128:$sbase, i32:$voff),
+ (BUFFER_LOAD_DWORD_OFFEN $sbase, $voff)
>;
// The multiplication scales from [0,1] to the unsigned integer range
@@ -1545,6 +1820,12 @@ def : Pat <
(V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
>;
+def : Pat <
+ (int_SI_tid),
+ (V_MBCNT_HI_U32_B32_e32 0xffffffff,
+ (V_MBCNT_LO_U32_B32_e64 0xffffffff, 0, 0, 0, 0, 0))
+>;
+
/********** ================== **********/
/********** VOP3 Patterns **********/
/********** ================== **********/
@@ -1554,6 +1835,40 @@ def : Pat <
(V_MAD_F32 $src0, $src1, $src2)
>;
+/********** ======================= **********/
+/********** Load/Store Patterns **********/
+/********** ======================= **********/
+
+class DSReadPat <DS inst, ValueType vt, PatFrag frag> : Pat <
+ (frag i32:$src0),
+ (vt (inst 0, $src0, $src0, $src0, 0, 0))
+>;
+
+def : DSReadPat <DS_READ_I8, i32, sextloadi8_local>;
+def : DSReadPat <DS_READ_U8, i32, az_extloadi8_local>;
+def : DSReadPat <DS_READ_I16, i32, sextloadi16_local>;
+def : DSReadPat <DS_READ_U16, i32, az_extloadi16_local>;
+def : DSReadPat <DS_READ_B32, i32, local_load>;
+def : Pat <
+ (local_load i32:$src0),
+ (i32 (DS_READ_B32 0, $src0, $src0, $src0, 0, 0))
+>;
+
+class DSWritePat <DS inst, ValueType vt, PatFrag frag> : Pat <
+ (frag i32:$src1, i32:$src0),
+ (inst 0, $src0, $src1, $src1, 0, 0)
+>;
+
+def : DSWritePat <DS_WRITE_B8, i32, truncstorei8_local>;
+def : DSWritePat <DS_WRITE_B16, i32, truncstorei16_local>;
+def : DSWritePat <DS_WRITE_B32, i32, local_store>;
+
+def : Pat <(atomic_load_add_local i32:$ptr, i32:$val),
+ (DS_ADD_U32_RTN 0, $ptr, $val, 0, 0)>;
+
+def : Pat <(atomic_load_sub_local i32:$ptr, i32:$val),
+ (DS_SUB_U32_RTN 0, $ptr, $val, 0, 0)>;
+
/********** ================== **********/
/********** SMRD Patterns **********/
/********** ================== **********/
@@ -1581,8 +1896,100 @@ multiclass SMRD_Pattern <SMRD Instr_IMM, SMRD Instr_SGPR, ValueType vt> {
defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, f32>;
defm : SMRD_Pattern <S_LOAD_DWORD_IMM, S_LOAD_DWORD_SGPR, i32>;
-defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v16i8>;
+defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, i64>;
+defm : SMRD_Pattern <S_LOAD_DWORDX2_IMM, S_LOAD_DWORDX2_SGPR, v2i32>;
+defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, i128>;
+defm : SMRD_Pattern <S_LOAD_DWORDX4_IMM, S_LOAD_DWORDX4_SGPR, v4i32>;
defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v32i8>;
+defm : SMRD_Pattern <S_LOAD_DWORDX8_IMM, S_LOAD_DWORDX8_SGPR, v8i32>;
+defm : SMRD_Pattern <S_LOAD_DWORDX16_IMM, S_LOAD_DWORDX16_SGPR, v16i32>;
+
+//===----------------------------------------------------------------------===//
+// MUBUF Patterns
+//===----------------------------------------------------------------------===//
+
+multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
+ PatFrag global_ld, PatFrag constant_ld> {
+ def : Pat <
+ (vt (global_ld (add i64:$ptr, (i64 IMM12bit:$offset)))),
+ (Instr_ADDR64 (SI_ADDR64_RSRC (i64 0)), $ptr, (as_i16imm $offset))
+ >;
+
+ def : Pat <
+ (vt (global_ld i64:$ptr)),
+ (Instr_ADDR64 (SI_ADDR64_RSRC (i64 0)), $ptr, 0)
+ >;
+
+ def : Pat <
+ (vt (global_ld (add i64:$ptr, i64:$offset))),
+ (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0)
+ >;
+
+ def : Pat <
+ (vt (constant_ld (add i64:$ptr, i64:$offset))),
+ (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0)
+ >;
+}
+
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32,
+ sextloadi8_global, sextloadi8_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32,
+ az_extloadi8_global, az_extloadi8_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32,
+ sextloadi16_global, sextloadi16_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32,
+ az_extloadi16_global, az_extloadi16_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32,
+ global_load, constant_load>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, i64,
+ global_load, constant_load>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, i64,
+ az_extloadi32_global, az_extloadi32_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, v2i32,
+ global_load, constant_load>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX4_ADDR64, v4i32,
+ global_load, constant_load>;
+
+multiclass MUBUFStore_Pattern <MUBUF Instr, ValueType vt, PatFrag st> {
+
+ def : Pat <
+ (st vt:$value, i64:$ptr),
+ (Instr $value, (SI_ADDR64_RSRC (i64 0)), $ptr, 0)
+ >;
+
+ def : Pat <
+ (st vt:$value, (add i64:$ptr, i64:$offset)),
+ (Instr $value, (SI_ADDR64_RSRC $ptr), $offset, 0)
+ >;
+}
+
+defm : MUBUFStore_Pattern <BUFFER_STORE_BYTE, i32, truncstorei8_global>;
+defm : MUBUFStore_Pattern <BUFFER_STORE_SHORT, i32, truncstorei16_global>;
+defm : MUBUFStore_Pattern <BUFFER_STORE_DWORD, i32, global_store>;
+defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2, i64, global_store>;
+defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2, v2i32, global_store>;
+defm : MUBUFStore_Pattern <BUFFER_STORE_DWORDX4, v4i32, global_store>;
+
+//===----------------------------------------------------------------------===//
+// MTBUF Patterns
+//===----------------------------------------------------------------------===//
+
+// TBUFFER_STORE_FORMAT_*, addr64=0
+class MTBUF_StoreResource <ValueType vt, int num_channels, MTBUF opcode> : Pat<
+ (SItbuffer_store i128:$rsrc, vt:$vdata, num_channels, i32:$vaddr,
+ i32:$soffset, imm:$inst_offset, imm:$dfmt,
+ imm:$nfmt, imm:$offen, imm:$idxen,
+ imm:$glc, imm:$slc, imm:$tfe),
+ (opcode
+ $vdata, (as_i16imm $inst_offset), (as_i1imm $offen), (as_i1imm $idxen),
+ (as_i1imm $glc), 0, (as_i8imm $dfmt), (as_i8imm $nfmt), $vaddr, $rsrc,
+ (as_i1imm $slc), (as_i1imm $tfe), $soffset)
+>;
+
+def : MTBUF_StoreResource <i32, 1, TBUFFER_STORE_FORMAT_X>;
+def : MTBUF_StoreResource <v2i32, 2, TBUFFER_STORE_FORMAT_XY>;
+def : MTBUF_StoreResource <v4i32, 3, TBUFFER_STORE_FORMAT_XYZ>;
+def : MTBUF_StoreResource <v4i32, 4, TBUFFER_STORE_FORMAT_XYZW>;
/********** ====================== **********/
/********** Indirect adressing **********/
@@ -1592,25 +1999,25 @@ multiclass SI_INDIRECT_Pattern <ValueType vt, SI_INDIRECT_DST IndDst> {
// 1. Extract with offset
def : Pat<
- (vector_extract vt:$vec, (i64 (zext (add i32:$idx, imm:$off)))),
+ (vector_extract vt:$vec, (add i32:$idx, imm:$off)),
(f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, imm:$off))
>;
// 2. Extract without offset
def : Pat<
- (vector_extract vt:$vec, (i64 (zext i32:$idx))),
+ (vector_extract vt:$vec, i32:$idx),
(f32 (SI_INDIRECT_SRC (IMPLICIT_DEF), $vec, $idx, 0))
>;
// 3. Insert with offset
def : Pat<
- (vector_insert vt:$vec, f32:$val, (i64 (zext (add i32:$idx, imm:$off)))),
+ (vector_insert vt:$vec, f32:$val, (add i32:$idx, imm:$off)),
(IndDst (IMPLICIT_DEF), $vec, $idx, imm:$off, $val)
>;
// 4. Insert without offset
def : Pat<
- (vector_insert vt:$vec, f32:$val, (i64 (zext i32:$idx))),
+ (vector_insert vt:$vec, f32:$val, i32:$idx),
(IndDst (IMPLICIT_DEF), $vec, $idx, 0, $val)
>;
}
@@ -1634,6 +2041,37 @@ def : Pat<
(V_CMP_U_F32_e64 $src0, $src1)
>;
+//===----------------------------------------------------------------------===//
+// Miscellaneous Patterns
+//===----------------------------------------------------------------------===//
+
+def : Pat <
+ (i64 (trunc i128:$x)),
+ (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+ (i32 (EXTRACT_SUBREG $x, sub0)), sub0),
+ (i32 (EXTRACT_SUBREG $x, sub1)), sub1)
+>;
+
+def : Pat <
+ (i32 (trunc i64:$a)),
+ (EXTRACT_SUBREG $a, sub0)
+>;
+
+// V_ADD_I32_e32/S_ADD_I32 produces carry in VCC/SCC. For the vector
+// case, the sgpr-copies pass will fix this to use the vector version.
+def : Pat <
+ (i32 (addc i32:$src0, i32:$src1)),
+ (S_ADD_I32 $src0, $src1)
+>;
+
+def : Pat <
+ (or i64:$a, i64:$b),
+ (INSERT_SUBREG
+ (INSERT_SUBREG (IMPLICIT_DEF),
+ (V_OR_B32_e32 (EXTRACT_SUBREG $a, sub0), (EXTRACT_SUBREG $b, sub0)), sub0),
+ (V_OR_B32_e32 (EXTRACT_SUBREG $a, sub1), (EXTRACT_SUBREG $b, sub1)), sub1)
+>;
+
//============================================================================//
// Miscellaneous Optimization Patterns
//============================================================================//
diff --git a/lib/Target/R600/SIIntrinsics.td b/lib/Target/R600/SIIntrinsics.td
index 224cd2f..7fcc964 100644
--- a/lib/Target/R600/SIIntrinsics.td
+++ b/lib/Target/R600/SIIntrinsics.td
@@ -14,15 +14,35 @@
let TargetPrefix = "SI", isTarget = 1 in {
+ def int_SI_tid : Intrinsic <[llvm_i32_ty], [], [IntrNoMem]>;
def int_SI_packf16 : Intrinsic <[llvm_i32_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
def int_SI_export : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], []>;
- def int_SI_load_const : Intrinsic <[llvm_float_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_SI_vs_load_input : Intrinsic <[llvm_v4f32_ty], [llvm_v16i8_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]> ;
+ def int_SI_load_const : Intrinsic <[llvm_float_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_SI_vs_load_input : Intrinsic <[llvm_v4f32_ty], [llvm_anyint_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]> ;
- class Sample : Intrinsic <[llvm_v4f32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ // Fully-flexible TBUFFER_STORE_FORMAT_* except for the ADDR64 bit, which is not exposed
+ def int_SI_tbuffer_store : Intrinsic <
+ [],
+ [llvm_anyint_ty, // rsrc(SGPR)
+ llvm_anyint_ty, // vdata(VGPR), overloaded for types i32, v2i32, v4i32
+ llvm_i32_ty, // num_channels(imm), selects opcode suffix: 1=X, 2=XY, 3=XYZ, 4=XYZW
+ llvm_i32_ty, // vaddr(VGPR)
+ llvm_i32_ty, // soffset(SGPR)
+ llvm_i32_ty, // inst_offset(imm)
+ llvm_i32_ty, // dfmt(imm)
+ llvm_i32_ty, // nfmt(imm)
+ llvm_i32_ty, // offen(imm)
+ llvm_i32_ty, // idxen(imm)
+ llvm_i32_ty, // glc(imm)
+ llvm_i32_ty, // slc(imm)
+ llvm_i32_ty], // tfe(imm)
+ []>;
+
+ class Sample : Intrinsic <[llvm_v4f32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
def int_SI_sample : Sample;
def int_SI_sampleb : Sample;
+ def int_SI_sampled : Sample;
def int_SI_samplel : Sample;
def int_SI_imageload : Intrinsic <[llvm_v4i32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
diff --git a/lib/Target/R600/SILowerControlFlow.cpp b/lib/Target/R600/SILowerControlFlow.cpp
index 2b60eb9..958763d 100644
--- a/lib/Target/R600/SILowerControlFlow.cpp
+++ b/lib/Target/R600/SILowerControlFlow.cpp
@@ -91,8 +91,7 @@ private:
public:
SILowerControlFlowPass(TargetMachine &tm) :
- MachineFunctionPass(ID), TRI(tm.getRegisterInfo()),
- TII(tm.getInstrInfo()) { }
+ MachineFunctionPass(ID), TRI(0), TII(0) { }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -378,10 +377,13 @@ void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) {
unsigned Dst = MI.getOperand(0).getReg();
unsigned Vec = MI.getOperand(2).getReg();
unsigned Off = MI.getOperand(4).getImm();
+ unsigned SubReg = TRI->getSubReg(Vec, AMDGPU::sub0);
+ if (!SubReg)
+ SubReg = Vec;
- MachineInstr *MovRel =
+ MachineInstr *MovRel =
BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
- .addReg(TRI->getSubReg(Vec, AMDGPU::sub0) + Off)
+ .addReg(SubReg + Off)
.addReg(AMDGPU::M0, RegState::Implicit)
.addReg(Vec, RegState::Implicit);
@@ -396,10 +398,13 @@ void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
unsigned Dst = MI.getOperand(0).getReg();
unsigned Off = MI.getOperand(4).getImm();
unsigned Val = MI.getOperand(5).getReg();
+ unsigned SubReg = TRI->getSubReg(Dst, AMDGPU::sub0);
+ if (!SubReg)
+ SubReg = Dst;
MachineInstr *MovRel =
BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
- .addReg(TRI->getSubReg(Dst, AMDGPU::sub0) + Off, RegState::Define)
+ .addReg(SubReg + Off, RegState::Define)
.addReg(Val)
.addReg(AMDGPU::M0, RegState::Implicit)
.addReg(Dst, RegState::Implicit);
@@ -408,8 +413,12 @@ void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
}
bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
+ TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
+ SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
bool HaveKill = false;
+ bool NeedM0 = false;
bool NeedWQM = false;
unsigned Depth = 0;
@@ -474,6 +483,7 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
IndirectSrc(MI);
break;
+ case AMDGPU::SI_INDIRECT_DST_V1:
case AMDGPU::SI_INDIRECT_DST_V2:
case AMDGPU::SI_INDIRECT_DST_V4:
case AMDGPU::SI_INDIRECT_DST_V8:
@@ -481,6 +491,14 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
IndirectDst(MI);
break;
+ case AMDGPU::DS_READ_B32:
+ NeedWQM = true;
+ // Fall through
+ case AMDGPU::DS_WRITE_B32:
+ case AMDGPU::DS_ADD_U32_RTN:
+ NeedM0 = true;
+ break;
+
case AMDGPU::V_INTERP_P1_F32:
case AMDGPU::V_INTERP_P2_F32:
case AMDGPU::V_INTERP_MOV_F32:
@@ -491,7 +509,15 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
}
}
- if (NeedWQM) {
+ if (NeedM0) {
+ MachineBasicBlock &MBB = MF.front();
+ // Initialize M0 to a value that won't cause LDS access to be discarded
+ // due to offset clamping
+ BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_MOV_B32),
+ AMDGPU::M0).addImm(0xffffffff);
+ }
+
+ if (NeedWQM && MFI->ShaderType != ShaderType::COMPUTE) {
MachineBasicBlock &MBB = MF.front();
BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
AMDGPU::EXEC).addReg(AMDGPU::EXEC);
diff --git a/lib/Target/R600/SIMachineFunctionInfo.cpp b/lib/Target/R600/SIMachineFunctionInfo.cpp
index ee0e307..071f9fa 100644
--- a/lib/Target/R600/SIMachineFunctionInfo.cpp
+++ b/lib/Target/R600/SIMachineFunctionInfo.cpp
@@ -13,6 +13,10 @@
using namespace llvm;
+
+// Pin the vtable to this file.
+void SIMachineFunctionInfo::anchor() {}
+
SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
: AMDGPUMachineFunction(MF),
PSInputAddr(0) { }
diff --git a/lib/Target/R600/SIMachineFunctionInfo.h b/lib/Target/R600/SIMachineFunctionInfo.h
index 6da9f7f..2f1961c 100644
--- a/lib/Target/R600/SIMachineFunctionInfo.h
+++ b/lib/Target/R600/SIMachineFunctionInfo.h
@@ -22,6 +22,7 @@ namespace llvm {
/// This class keeps track of the SPI_SP_INPUT_ADDR config register, which
/// tells the hardware which interpolation parameters to load.
class SIMachineFunctionInfo : public AMDGPUMachineFunction {
+ virtual void anchor();
public:
SIMachineFunctionInfo(const MachineFunction &MF);
unsigned PSInputAddr;
diff --git a/lib/Target/R600/SIRegisterInfo.cpp b/lib/Target/R600/SIRegisterInfo.cpp
index 99278ae..ed0bbaf 100644
--- a/lib/Target/R600/SIRegisterInfo.cpp
+++ b/lib/Target/R600/SIRegisterInfo.cpp
@@ -15,18 +15,21 @@
#include "SIRegisterInfo.h"
#include "AMDGPUTargetMachine.h"
+#include "SIInstrInfo.h"
using namespace llvm;
-SIRegisterInfo::SIRegisterInfo(AMDGPUTargetMachine &tm,
- const TargetInstrInfo &tii)
-: AMDGPURegisterInfo(tm, tii),
- TM(tm),
- TII(tii)
+SIRegisterInfo::SIRegisterInfo(AMDGPUTargetMachine &tm)
+: AMDGPURegisterInfo(tm),
+ TM(tm)
{ }
BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
+ Reserved.set(AMDGPU::EXEC);
+ Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(TM.getInstrInfo());
+ TII->reserveIndirectRegisters(Reserved, MF);
return Reserved;
}
@@ -51,3 +54,78 @@ const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
case MVT::i32: return &AMDGPU::VReg_32RegClass;
}
}
+
+unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
+ return getEncodingValue(Reg);
+}
+
+const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
+ assert(!TargetRegisterInfo::isVirtualRegister(Reg));
+
+ const TargetRegisterClass *BaseClasses[] = {
+ &AMDGPU::VReg_32RegClass,
+ &AMDGPU::SReg_32RegClass,
+ &AMDGPU::VReg_64RegClass,
+ &AMDGPU::SReg_64RegClass,
+ &AMDGPU::SReg_128RegClass,
+ &AMDGPU::SReg_256RegClass
+ };
+
+ for (unsigned i = 0, e = sizeof(BaseClasses) /
+ sizeof(const TargetRegisterClass*); i != e; ++i) {
+ if (BaseClasses[i]->contains(Reg)) {
+ return BaseClasses[i];
+ }
+ }
+ return NULL;
+}
+
+bool SIRegisterInfo::isSGPRClass(const TargetRegisterClass *RC) const {
+ if (!RC) {
+ return false;
+ }
+ return !hasVGPRs(RC);
+}
+
+bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
+ return getCommonSubClass(&AMDGPU::VReg_32RegClass, RC) ||
+ getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
+ getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
+ getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
+ getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) ||
+ getCommonSubClass(&AMDGPU::VReg_512RegClass, RC);
+}
+
+const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
+ const TargetRegisterClass *SRC) const {
+ if (hasVGPRs(SRC)) {
+ return SRC;
+ } else if (SRC == &AMDGPU::SCCRegRegClass) {
+ return &AMDGPU::VCCRegRegClass;
+ } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
+ return &AMDGPU::VReg_32RegClass;
+ } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
+ return &AMDGPU::VReg_64RegClass;
+ } else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
+ return &AMDGPU::VReg_128RegClass;
+ } else if (getCommonSubClass(SRC, &AMDGPU::SReg_256RegClass)) {
+ return &AMDGPU::VReg_256RegClass;
+ } else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) {
+ return &AMDGPU::VReg_512RegClass;
+ }
+ return NULL;
+}
+
+const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
+ const TargetRegisterClass *RC, unsigned SubIdx) const {
+ if (SubIdx == AMDGPU::NoSubRegister)
+ return RC;
+
+ // If this register has a sub-register, we can safely assume it is a 32-bit
+ // register, becuase all of SI's sub-registers are 32-bit.
+ if (isSGPRClass(RC)) {
+ return &AMDGPU::SGPR_32RegClass;
+ } else {
+ return &AMDGPU::VGPR_32RegClass;
+ }
+}
diff --git a/lib/Target/R600/SIRegisterInfo.h b/lib/Target/R600/SIRegisterInfo.h
index caec228..8148f7f 100644
--- a/lib/Target/R600/SIRegisterInfo.h
+++ b/lib/Target/R600/SIRegisterInfo.h
@@ -21,13 +21,11 @@
namespace llvm {
class AMDGPUTargetMachine;
-class TargetInstrInfo;
struct SIRegisterInfo : public AMDGPURegisterInfo {
AMDGPUTargetMachine &TM;
- const TargetInstrInfo &TII;
- SIRegisterInfo(AMDGPUTargetMachine &tm, const TargetInstrInfo &tii);
+ SIRegisterInfo(AMDGPUTargetMachine &tm);
virtual BitVector getReservedRegs(const MachineFunction &MF) const;
@@ -43,6 +41,28 @@ struct SIRegisterInfo : public AMDGPURegisterInfo {
/// \brief get the register class of the specified type to use in the
/// CFGStructurizer
virtual const TargetRegisterClass * getCFGStructurizerRegClass(MVT VT) const;
+
+ virtual unsigned getHWRegIndex(unsigned Reg) const;
+
+ /// \brief Return the 'base' register class for this register.
+ /// e.g. SGPR0 => SReg_32, VGPR => VReg_32 SGPR0_SGPR1 -> SReg_32, etc.
+ const TargetRegisterClass *getPhysRegClass(unsigned Reg) const;
+
+ /// \returns true if this class contains only SGPR registers
+ bool isSGPRClass(const TargetRegisterClass *RC) const;
+
+ /// \returns true if this class contains VGPR registers.
+ bool hasVGPRs(const TargetRegisterClass *RC) const;
+
+ /// \returns A VGPR reg class with the same width as \p SRC
+ const TargetRegisterClass *getEquivalentVGPRClass(
+ const TargetRegisterClass *SRC) const;
+
+ /// \returns The register class that is used for a sub-register of \p RC for
+ /// the given \p SubIdx. If \p SubIdx equals NoSubRegister, \p RC will
+ /// be returned.
+ const TargetRegisterClass *getSubRegClass(const TargetRegisterClass *RC,
+ unsigned SubIdx) const;
};
} // End namespace llvm
diff --git a/lib/Target/R600/SIRegisterInfo.td b/lib/Target/R600/SIRegisterInfo.td
index 244d4c00..49bdbc9 100644
--- a/lib/Target/R600/SIRegisterInfo.td
+++ b/lib/Target/R600/SIRegisterInfo.td
@@ -43,7 +43,7 @@ def SGPR_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
(add (sequence "SGPR%u", 0, 101))>;
// SGPR 64-bit registers
-def SGPR_64 : RegisterTuples<[sub0, sub1],
+def SGPR_64Regs : RegisterTuples<[sub0, sub1],
[(add (decimate (trunc SGPR_32, 101), 2)),
(add (decimate (shl SGPR_32, 1), 2))]>;
@@ -153,15 +153,17 @@ def SReg_32 : RegisterClass<"AMDGPU", [f32, i32], 32,
(add SGPR_32, M0Reg)
>;
-def SReg_64 : RegisterClass<"AMDGPU", [i64, i1], 64,
- (add SGPR_64, VCCReg, EXECReg)
+def SGPR_64 : RegisterClass<"AMDGPU", [v2i32, i64], 64, (add SGPR_64Regs)>;
+
+def SReg_64 : RegisterClass<"AMDGPU", [v2i32, i64, i1], 64,
+ (add SGPR_64Regs, VCCReg, EXECReg)
>;
-def SReg_128 : RegisterClass<"AMDGPU", [v16i8, i128], 128, (add SGPR_128)>;
+def SReg_128 : RegisterClass<"AMDGPU", [i128, v4i32], 128, (add SGPR_128)>;
-def SReg_256 : RegisterClass<"AMDGPU", [v32i8], 256, (add SGPR_256)>;
+def SReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add SGPR_256)>;
-def SReg_512 : RegisterClass<"AMDGPU", [v64i8], 512, (add SGPR_512)>;
+def SReg_512 : RegisterClass<"AMDGPU", [v64i8, v16i32], 512, (add SGPR_512)>;
// Register class for all vector registers (VGPRs + Interploation Registers)
def VReg_32 : RegisterClass<"AMDGPU", [i32, f32, v1i32], 32, (add VGPR_32)>;
@@ -172,9 +174,9 @@ def VReg_96 : RegisterClass<"AMDGPU", [untyped], 96, (add VGPR_96)> {
let Size = 96;
}
-def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32], 128, (add VGPR_128)>;
+def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, i128], 128, (add VGPR_128)>;
-def VReg_256 : RegisterClass<"AMDGPU", [v8i32, v8f32], 256, (add VGPR_256)>;
+def VReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 256, (add VGPR_256)>;
def VReg_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 512, (add VGPR_512)>;
diff --git a/lib/Target/R600/SITypeRewriter.cpp b/lib/Target/R600/SITypeRewriter.cpp
new file mode 100644
index 0000000..f194d8b
--- /dev/null
+++ b/lib/Target/R600/SITypeRewriter.cpp
@@ -0,0 +1,162 @@
+//===-- SITypeRewriter.cpp - Remove unwanted types ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This pass removes performs the following type substitution on all
+/// non-compute shaders:
+///
+/// v16i8 => i128
+/// - v16i8 is used for constant memory resource descriptors. This type is
+/// legal for some compute APIs, and we don't want to declare it as legal
+/// in the backend, because we want the legalizer to expand all v16i8
+/// operations.
+/// v1* => *
+/// - Having v1* types complicates the legalizer and we can easily replace
+/// - them with the element type.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/InstVisitor.h"
+
+using namespace llvm;
+
+namespace {
+
+class SITypeRewriter : public FunctionPass,
+ public InstVisitor<SITypeRewriter> {
+
+ static char ID;
+ Module *Mod;
+ Type *v16i8;
+ Type *i128;
+
+public:
+ SITypeRewriter() : FunctionPass(ID) { }
+ virtual bool doInitialization(Module &M);
+ virtual bool runOnFunction(Function &F);
+ virtual const char *getPassName() const {
+ return "SI Type Rewriter";
+ }
+ void visitLoadInst(LoadInst &I);
+ void visitCallInst(CallInst &I);
+ void visitBitCast(BitCastInst &I);
+};
+
+} // End anonymous namespace
+
+char SITypeRewriter::ID = 0;
+
+bool SITypeRewriter::doInitialization(Module &M) {
+ Mod = &M;
+ v16i8 = VectorType::get(Type::getInt8Ty(M.getContext()), 16);
+ i128 = Type::getIntNTy(M.getContext(), 128);
+ return false;
+}
+
+bool SITypeRewriter::runOnFunction(Function &F) {
+ AttributeSet Set = F.getAttributes();
+ Attribute A = Set.getAttribute(AttributeSet::FunctionIndex, "ShaderType");
+
+ unsigned ShaderType = ShaderType::COMPUTE;
+ if (A.isStringAttribute()) {
+ StringRef Str = A.getValueAsString();
+ Str.getAsInteger(0, ShaderType);
+ }
+ if (ShaderType != ShaderType::COMPUTE) {
+ visit(F);
+ }
+
+ visit(F);
+
+ return false;
+}
+
+void SITypeRewriter::visitLoadInst(LoadInst &I) {
+ Value *Ptr = I.getPointerOperand();
+ Type *PtrTy = Ptr->getType();
+ Type *ElemTy = PtrTy->getPointerElementType();
+ IRBuilder<> Builder(&I);
+ if (ElemTy == v16i8) {
+ Value *BitCast = Builder.CreateBitCast(Ptr, Type::getIntNPtrTy(I.getContext(), 128, 2));
+ LoadInst *Load = Builder.CreateLoad(BitCast);
+ SmallVector <std::pair<unsigned, MDNode*>, 8> MD;
+ I.getAllMetadataOtherThanDebugLoc(MD);
+ for (unsigned i = 0, e = MD.size(); i != e; ++i) {
+ Load->setMetadata(MD[i].first, MD[i].second);
+ }
+ Value *BitCastLoad = Builder.CreateBitCast(Load, I.getType());
+ I.replaceAllUsesWith(BitCastLoad);
+ I.eraseFromParent();
+ }
+}
+
+void SITypeRewriter::visitCallInst(CallInst &I) {
+ IRBuilder<> Builder(&I);
+ SmallVector <Value*, 8> Args;
+ SmallVector <Type*, 8> Types;
+ bool NeedToReplace = false;
+ Function *F = I.getCalledFunction();
+ std::string Name = F->getName().str();
+ for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
+ Value *Arg = I.getArgOperand(i);
+ if (Arg->getType() == v16i8) {
+ Args.push_back(Builder.CreateBitCast(Arg, i128));
+ Types.push_back(i128);
+ NeedToReplace = true;
+ Name = Name + ".i128";
+ } else if (Arg->getType()->isVectorTy() &&
+ Arg->getType()->getVectorNumElements() == 1 &&
+ Arg->getType()->getVectorElementType() ==
+ Type::getInt32Ty(I.getContext())){
+ Type *ElementTy = Arg->getType()->getVectorElementType();
+ std::string TypeName = "i32";
+ InsertElementInst *Def = dyn_cast<InsertElementInst>(Arg);
+ assert(Def);
+ Args.push_back(Def->getOperand(1));
+ Types.push_back(ElementTy);
+ std::string VecTypeName = "v1" + TypeName;
+ Name = Name.replace(Name.find(VecTypeName), VecTypeName.length(), TypeName);
+ NeedToReplace = true;
+ } else {
+ Args.push_back(Arg);
+ Types.push_back(Arg->getType());
+ }
+ }
+
+ if (!NeedToReplace) {
+ return;
+ }
+ Function *NewF = Mod->getFunction(Name);
+ if (!NewF) {
+ NewF = Function::Create(FunctionType::get(F->getReturnType(), Types, false), GlobalValue::ExternalLinkage, Name, Mod);
+ NewF->setAttributes(F->getAttributes());
+ }
+ I.replaceAllUsesWith(Builder.CreateCall(NewF, Args));
+ I.eraseFromParent();
+}
+
+void SITypeRewriter::visitBitCast(BitCastInst &I) {
+ IRBuilder<> Builder(&I);
+ if (I.getDestTy() != i128) {
+ return;
+ }
+
+ if (BitCastInst *Op = dyn_cast<BitCastInst>(I.getOperand(0))) {
+ if (Op->getSrcTy() == i128) {
+ I.replaceAllUsesWith(Op->getOperand(0));
+ I.eraseFromParent();
+ }
+ }
+}
+
+FunctionPass *llvm::createSITypeRewriter() {
+ return new SITypeRewriter();
+}
diff --git a/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp b/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
index 46b1f18..f437564 100644
--- a/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
+++ b/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "AMDGPU.h"
+#include "AMDGPUTargetMachine.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
OpenPOWER on IntegriCloud