summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp')
-rw-r--r--contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp137
1 files changed, 80 insertions, 57 deletions
diff --git a/contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp b/contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
index 1cb9d21..ffb92aa 100644
--- a/contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
@@ -13,6 +13,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCValue.h"
@@ -22,30 +23,19 @@ using namespace llvm;
namespace {
-class AMDGPUMCObjectWriter : public MCObjectWriter {
-public:
- AMDGPUMCObjectWriter(raw_pwrite_stream &OS) : MCObjectWriter(OS, true) {}
- void executePostLayoutBinding(MCAssembler &Asm,
- const MCAsmLayout &Layout) override {
- //XXX: Implement if necessary.
- }
- void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
- const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, bool &IsPCRel,
- uint64_t &FixedValue) override {
- assert(!"Not implemented");
- }
-
- void writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
-
-};
-
class AMDGPUAsmBackend : public MCAsmBackend {
public:
AMDGPUAsmBackend(const Target &T)
: MCAsmBackend() {}
unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
+
+ void processFixupValue(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) override;
+
void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value, bool IsPCRel) const override;
bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
@@ -55,7 +45,7 @@ public:
}
void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
MCInst &Res) const override {
- assert(!"Not implemented");
+ llvm_unreachable("Not implemented");
}
bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
@@ -65,15 +55,10 @@ public:
} //End anonymous namespace
-void AMDGPUMCObjectWriter::writeObject(MCAssembler &Asm,
- const MCAsmLayout &Layout) {
- for (MCAssembler::iterator I = Asm.begin(), E = Asm.end(); I != E; ++I) {
- Asm.writeSectionData(&*I, Layout);
- }
-}
-
static unsigned getFixupKindNumBytes(unsigned Kind) {
switch (Kind) {
+ case AMDGPU::fixup_si_sopp_br:
+ return 2;
case FK_SecRel_1:
case FK_Data_1:
return 1;
@@ -92,40 +77,77 @@ static unsigned getFixupKindNumBytes(unsigned Kind) {
}
}
+static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
+ MCContext *Ctx) {
+ int64_t SignedValue = static_cast<int64_t>(Value);
+
+ switch (Fixup.getKind()) {
+ case AMDGPU::fixup_si_sopp_br: {
+ int64_t BrImm = (SignedValue - 4) / 4;
+
+ if (Ctx && !isInt<16>(BrImm))
+ Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
+
+ return BrImm;
+ }
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
+ case FK_Data_8:
+ case FK_PCRel_4:
+ case FK_SecRel_4:
+ return Value;
+ default:
+ llvm_unreachable("unhandled fixup kind");
+ }
+}
+
+void AMDGPUAsmBackend::processFixupValue(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) {
+ MCValue Res;
+
+ // When we have complex expressions like: BB0_1 + (BB0_2 - 4), which are
+ // used for long branches, this function will be called with
+ // IsResolved = false and Value set to some pre-computed value. In
+ // the example above, the value would be:
+ // (BB0_1 + (BB0_2 - 4)) - CurrentOffsetFromStartOfFunction.
+ // This is not what we want. We just want the expression computation
+ // only. The reason the MC layer subtracts the current offset from the
+ // expression is because the fixup is of kind FK_PCRel_4.
+ // For these scenarios, evaluateAsValue gives us the computation that we
+ // want.
+ if (!IsResolved && Fixup.getValue()->evaluateAsValue(Res, Layout) &&
+ Res.isAbsolute()) {
+ Value = Res.getConstant();
+ IsResolved = true;
+
+ }
+ if (IsResolved)
+ Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
+}
+
void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value,
bool IsPCRel) const {
+ if (!Value)
+ return; // Doesn't change encoding.
- switch ((unsigned)Fixup.getKind()) {
- case AMDGPU::fixup_si_sopp_br: {
- int64_t BrImm = ((int64_t)Value - 4) / 4;
- if (!isInt<16>(BrImm))
- report_fatal_error("branch size exceeds simm16");
-
- uint16_t *Dst = (uint16_t*)(Data + Fixup.getOffset());
- *Dst = BrImm;
- break;
- }
-
- default: {
- // FIXME: Copied from AArch64
- unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
- if (!Value)
- return; // Doesn't change encoding.
- MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
-
- // Shift the value into position.
- Value <<= Info.TargetOffset;
-
- unsigned Offset = Fixup.getOffset();
- assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
-
- // For each byte of the fragment that the fixup touches, mask in the
- // bits from the fixup value.
- for (unsigned i = 0; i != NumBytes; ++i)
- Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
- }
- }
+ MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
+
+ // Shift the value into position.
+ Value <<= Info.TargetOffset;
+
+ unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
+ uint32_t Offset = Fixup.getOffset();
+ assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
+
+ // For each byte of the fragment that the fixup touches, mask in the bits from
+ // the fixup value.
+ for (unsigned i = 0; i != NumBytes; ++i)
+ Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
}
const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
@@ -171,7 +193,8 @@ public:
MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- const Triple &TT, StringRef CPU) {
+ const Triple &TT, StringRef CPU,
+ const MCTargetOptions &Options) {
// Use 64-bit ELF for amdgcn
return new ELFAMDGPUAsmBackend(T, TT);
}
OpenPOWER on IntegriCloud