summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/ExecutionEngine
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2014-11-24 17:02:24 +0000
committerdim <dim@FreeBSD.org>2014-11-24 17:02:24 +0000
commit2c8643c6396b0a3db33430cf9380e70bbb9efce0 (patch)
tree4df130b28021d86e13bf4565ef58c1c5a5e093b4 /contrib/llvm/lib/ExecutionEngine
parent678318cd20f7db4e6c6b85d83fe00fa327b04fca (diff)
parente27feadae0885aa074df58ebfda2e7a7f7a7d590 (diff)
downloadFreeBSD-src-2c8643c6396b0a3db33430cf9380e70bbb9efce0.zip
FreeBSD-src-2c8643c6396b0a3db33430cf9380e70bbb9efce0.tar.gz
Merge llvm 3.5.0 release from ^/vendor/llvm/dist, resolve conflicts, and
preserve our customizations, where necessary.
Diffstat (limited to 'contrib/llvm/lib/ExecutionEngine')
-rw-r--r--contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h4
-rw-r--r--contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp251
-rw-r--r--contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp43
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp21
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h14
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp71
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp6
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp7
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h24
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp85
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.h49
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp124
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp144
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp125
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h96
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp27
-rw-r--r--contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp23
-rw-r--r--contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp7
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RTDyldMemoryManager.cpp1
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp51
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h62
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp623
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp656
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp993
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h133
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h164
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp517
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h194
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h255
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h154
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h315
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h132
-rw-r--r--contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp8
33 files changed, 3594 insertions, 1785 deletions
diff --git a/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h b/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
index 314db8b..66645d7 100644
--- a/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
+++ b/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
@@ -15,10 +15,10 @@
#define EVENT_LISTENER_COMMON_H
#include "llvm/ADT/DenseMap.h"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/ValueHandle.h"
namespace llvm {
diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
index 2a610d5..b0e985d 100644
--- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -12,31 +12,33 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
-#include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/ExecutionEngine/ObjectCache.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include <cmath>
#include <cstring>
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
STATISTIC(NumInitBytes, "Number of bytes of global vars initialized");
STATISTIC(NumGlobals , "Number of global vars initialized");
@@ -50,22 +52,31 @@ ExecutionEngine *(*ExecutionEngine::JITCtor)(
std::string *ErrorStr,
JITMemoryManager *JMM,
bool GVsWithCode,
- TargetMachine *TM) = 0;
+ TargetMachine *TM) = nullptr;
ExecutionEngine *(*ExecutionEngine::MCJITCtor)(
Module *M,
std::string *ErrorStr,
RTDyldMemoryManager *MCJMM,
bool GVsWithCode,
- TargetMachine *TM) = 0;
+ TargetMachine *TM) = nullptr;
ExecutionEngine *(*ExecutionEngine::InterpCtor)(Module *M,
- std::string *ErrorStr) = 0;
+ std::string *ErrorStr) =nullptr;
ExecutionEngine::ExecutionEngine(Module *M)
: EEState(*this),
- LazyFunctionCreator(0) {
+ LazyFunctionCreator(nullptr) {
CompilingLazily = false;
GVCompilationDisabled = false;
SymbolSearchingDisabled = false;
+
+ // IR module verification is enabled by default in debug builds, and disabled
+ // by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
+
Modules.push_back(M);
assert(M && "Module is null?");
}
@@ -97,7 +108,7 @@ public:
return static_cast<char*>(RawMemory) + sizeof(GVMemoryBlock);
}
- virtual void deleted() {
+ void deleted() override {
// We allocated with operator new and with some extra memory hanging off the
// end, so don't just delete this. I'm not sure if this is actually
// required.
@@ -111,6 +122,10 @@ char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) {
return GVMemoryBlock::Create(GV, *getDataLayout());
}
+void ExecutionEngine::addObjectFile(std::unique_ptr<object::ObjectFile> O) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addObjectFile.");
+}
+
bool ExecutionEngine::removeModule(Module *M) {
for(SmallVectorImpl<Module *>::iterator I = Modules.begin(),
E = Modules.end(); I != E; ++I) {
@@ -129,19 +144,18 @@ Function *ExecutionEngine::FindFunctionNamed(const char *FnName) {
if (Function *F = Modules[i]->getFunction(FnName))
return F;
}
- return 0;
+ return nullptr;
}
-void *ExecutionEngineState::RemoveMapping(const MutexGuard &,
- const GlobalValue *ToUnmap) {
+void *ExecutionEngineState::RemoveMapping(const GlobalValue *ToUnmap) {
GlobalAddressMapTy::iterator I = GlobalAddressMap.find(ToUnmap);
void *OldVal;
// FIXME: This is silly, we shouldn't end up with a mapping -> 0 in the
// GlobalAddressMap.
if (I == GlobalAddressMap.end())
- OldVal = 0;
+ OldVal = nullptr;
else {
OldVal = I->second;
GlobalAddressMap.erase(I);
@@ -156,15 +170,15 @@ void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
DEBUG(dbgs() << "JIT: Map \'" << GV->getName()
<< "\' to [" << Addr << "]\n";);
- void *&CurVal = EEState.getGlobalAddressMap(locked)[GV];
- assert((CurVal == 0 || Addr == 0) && "GlobalMapping already established!");
+ void *&CurVal = EEState.getGlobalAddressMap()[GV];
+ assert((!CurVal || !Addr) && "GlobalMapping already established!");
CurVal = Addr;
// If we are using the reverse mapping, add it too.
- if (!EEState.getGlobalAddressReverseMap(locked).empty()) {
+ if (!EEState.getGlobalAddressReverseMap().empty()) {
AssertingVH<const GlobalValue> &V =
- EEState.getGlobalAddressReverseMap(locked)[Addr];
- assert((V == 0 || GV == 0) && "GlobalMapping already established!");
+ EEState.getGlobalAddressReverseMap()[Addr];
+ assert((!V || !GV) && "GlobalMapping already established!");
V = GV;
}
}
@@ -172,42 +186,42 @@ void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
void ExecutionEngine::clearAllGlobalMappings() {
MutexGuard locked(lock);
- EEState.getGlobalAddressMap(locked).clear();
- EEState.getGlobalAddressReverseMap(locked).clear();
+ EEState.getGlobalAddressMap().clear();
+ EEState.getGlobalAddressReverseMap().clear();
}
void ExecutionEngine::clearGlobalMappingsFromModule(Module *M) {
MutexGuard locked(lock);
for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI)
- EEState.RemoveMapping(locked, FI);
+ EEState.RemoveMapping(FI);
for (Module::global_iterator GI = M->global_begin(), GE = M->global_end();
GI != GE; ++GI)
- EEState.RemoveMapping(locked, GI);
+ EEState.RemoveMapping(GI);
}
void *ExecutionEngine::updateGlobalMapping(const GlobalValue *GV, void *Addr) {
MutexGuard locked(lock);
ExecutionEngineState::GlobalAddressMapTy &Map =
- EEState.getGlobalAddressMap(locked);
+ EEState.getGlobalAddressMap();
// Deleting from the mapping?
- if (Addr == 0)
- return EEState.RemoveMapping(locked, GV);
+ if (!Addr)
+ return EEState.RemoveMapping(GV);
void *&CurVal = Map[GV];
void *OldVal = CurVal;
- if (CurVal && !EEState.getGlobalAddressReverseMap(locked).empty())
- EEState.getGlobalAddressReverseMap(locked).erase(CurVal);
+ if (CurVal && !EEState.getGlobalAddressReverseMap().empty())
+ EEState.getGlobalAddressReverseMap().erase(CurVal);
CurVal = Addr;
// If we are using the reverse mapping, add it too.
- if (!EEState.getGlobalAddressReverseMap(locked).empty()) {
+ if (!EEState.getGlobalAddressReverseMap().empty()) {
AssertingVH<const GlobalValue> &V =
- EEState.getGlobalAddressReverseMap(locked)[Addr];
- assert((V == 0 || GV == 0) && "GlobalMapping already established!");
+ EEState.getGlobalAddressReverseMap()[Addr];
+ assert((!V || !GV) && "GlobalMapping already established!");
V = GV;
}
return OldVal;
@@ -217,25 +231,25 @@ void *ExecutionEngine::getPointerToGlobalIfAvailable(const GlobalValue *GV) {
MutexGuard locked(lock);
ExecutionEngineState::GlobalAddressMapTy::iterator I =
- EEState.getGlobalAddressMap(locked).find(GV);
- return I != EEState.getGlobalAddressMap(locked).end() ? I->second : 0;
+ EEState.getGlobalAddressMap().find(GV);
+ return I != EEState.getGlobalAddressMap().end() ? I->second : nullptr;
}
const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) {
MutexGuard locked(lock);
// If we haven't computed the reverse mapping yet, do so first.
- if (EEState.getGlobalAddressReverseMap(locked).empty()) {
+ if (EEState.getGlobalAddressReverseMap().empty()) {
for (ExecutionEngineState::GlobalAddressMapTy::iterator
- I = EEState.getGlobalAddressMap(locked).begin(),
- E = EEState.getGlobalAddressMap(locked).end(); I != E; ++I)
- EEState.getGlobalAddressReverseMap(locked).insert(std::make_pair(
+ I = EEState.getGlobalAddressMap().begin(),
+ E = EEState.getGlobalAddressMap().end(); I != E; ++I)
+ EEState.getGlobalAddressReverseMap().insert(std::make_pair(
I->second, I->first));
}
std::map<void *, AssertingVH<const GlobalValue> >::iterator I =
- EEState.getGlobalAddressReverseMap(locked).find(Addr);
- return I != EEState.getGlobalAddressReverseMap(locked).end() ? I->second : 0;
+ EEState.getGlobalAddressReverseMap().find(Addr);
+ return I != EEState.getGlobalAddressReverseMap().end() ? I->second : nullptr;
}
namespace {
@@ -243,11 +257,11 @@ class ArgvArray {
char *Array;
std::vector<char*> Values;
public:
- ArgvArray() : Array(NULL) {}
+ ArgvArray() : Array(nullptr) {}
~ArgvArray() { clear(); }
void clear() {
delete[] Array;
- Array = NULL;
+ Array = nullptr;
for (size_t I = 0, E = Values.size(); I != E; ++I) {
delete[] Values[I];
}
@@ -283,7 +297,7 @@ void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
}
// Null terminate it
- EE->StoreValueToMemory(PTOGV(0),
+ EE->StoreValueToMemory(PTOGV(nullptr),
(GenericValue*)(Array+InputArgv.size()*PtrSize),
SBytePtr);
return Array;
@@ -303,11 +317,11 @@ void ExecutionEngine::runStaticConstructorsDestructors(Module *module,
// Should be an array of '{ i32, void ()* }' structs. The first value is
// the init priority, which we ignore.
ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
- if (InitList == 0)
+ if (!InitList)
return;
for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i));
- if (CS == 0) continue;
+ if (!CS) continue;
Constant *FP = CS->getOperand(1);
if (FP->isNullValue())
@@ -397,13 +411,14 @@ ExecutionEngine *ExecutionEngine::create(Module *M,
std::string *ErrorStr,
CodeGenOpt::Level OptLevel,
bool GVsWithCode) {
- EngineBuilder EB = EngineBuilder(M)
- .setEngineKind(ForceInterpreter
- ? EngineKind::Interpreter
- : EngineKind::JIT)
- .setErrorStr(ErrorStr)
- .setOptLevel(OptLevel)
- .setAllocateGVsWithCode(GVsWithCode);
+
+ EngineBuilder EB =
+ EngineBuilder(M)
+ .setEngineKind(ForceInterpreter ? EngineKind::Interpreter
+ : EngineKind::Either)
+ .setErrorStr(ErrorStr)
+ .setOptLevel(OptLevel)
+ .setAllocateGVsWithCode(GVsWithCode);
return EB.create();
}
@@ -418,10 +433,10 @@ ExecutionEngine *ExecutionEngine::createJIT(Module *M,
bool GVsWithCode,
Reloc::Model RM,
CodeModel::Model CMM) {
- if (ExecutionEngine::JITCtor == 0) {
+ if (!ExecutionEngine::JITCtor) {
if (ErrorStr)
*ErrorStr = "JIT has not been linked in.";
- return 0;
+ return nullptr;
}
// Use the defaults for extra parameters. Users can use EngineBuilder to
@@ -437,18 +452,39 @@ ExecutionEngine *ExecutionEngine::createJIT(Module *M,
// TODO: permit custom TargetOptions here
TargetMachine *TM = EB.selectTarget();
- if (!TM || (ErrorStr && ErrorStr->length() > 0)) return 0;
+ if (!TM || (ErrorStr && ErrorStr->length() > 0)) return nullptr;
return ExecutionEngine::JITCtor(M, ErrorStr, JMM, GVsWithCode, TM);
}
+void EngineBuilder::InitEngine() {
+ WhichEngine = EngineKind::Either;
+ ErrorStr = nullptr;
+ OptLevel = CodeGenOpt::Default;
+ MCJMM = nullptr;
+ JMM = nullptr;
+ Options = TargetOptions();
+ AllocateGVsWithCode = false;
+ RelocModel = Reloc::Default;
+ CMModel = CodeModel::JITDefault;
+ UseMCJIT = false;
+
+// IR module verification is enabled by default in debug builds, and disabled
+// by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
+}
+
ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
- OwningPtr<TargetMachine> TheTM(TM); // Take ownership.
+ std::unique_ptr<TargetMachine> TheTM(TM); // Take ownership.
// Make sure we can resolve symbols in the program as well. The zero arg
// to the function tells DynamicLibrary to load the program, not a library.
- if (sys::DynamicLibrary::LoadLibraryPermanently(0, ErrorStr))
- return 0;
+ if (sys::DynamicLibrary::LoadLibraryPermanently(nullptr, ErrorStr))
+ return nullptr;
assert(!(JMM && MCJMM));
@@ -461,7 +497,7 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
else {
if (ErrorStr)
*ErrorStr = "Cannot create an interpreter with a memory manager.";
- return 0;
+ return nullptr;
}
}
@@ -470,7 +506,7 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
*ErrorStr =
"Cannot create a legacy JIT with a runtime dyld memory "
"manager.";
- return 0;
+ return nullptr;
}
// Unless the interpreter was explicitly selected or the JIT is not linked,
@@ -483,16 +519,17 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
<< " a different -march switch.\n";
}
- if (UseMCJIT && ExecutionEngine::MCJITCtor) {
- ExecutionEngine *EE =
- ExecutionEngine::MCJITCtor(M, ErrorStr, MCJMM ? MCJMM : JMM,
- AllocateGVsWithCode, TheTM.take());
- if (EE) return EE;
- } else if (ExecutionEngine::JITCtor) {
- ExecutionEngine *EE =
- ExecutionEngine::JITCtor(M, ErrorStr, JMM,
- AllocateGVsWithCode, TheTM.take());
- if (EE) return EE;
+ ExecutionEngine *EE = nullptr;
+ if (UseMCJIT && ExecutionEngine::MCJITCtor)
+ EE = ExecutionEngine::MCJITCtor(M, ErrorStr, MCJMM ? MCJMM : JMM,
+ AllocateGVsWithCode, TheTM.release());
+ else if (ExecutionEngine::JITCtor)
+ EE = ExecutionEngine::JITCtor(M, ErrorStr, JMM,
+ AllocateGVsWithCode, TheTM.release());
+
+ if (EE) {
+ EE->setVerifyModules(VerifyModules);
+ return EE;
}
}
@@ -503,16 +540,16 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
return ExecutionEngine::InterpCtor(M, ErrorStr);
if (ErrorStr)
*ErrorStr = "Interpreter has not been linked in.";
- return 0;
+ return nullptr;
}
- if ((WhichEngine & EngineKind::JIT) && ExecutionEngine::JITCtor == 0 &&
- ExecutionEngine::MCJITCtor == 0) {
+ if ((WhichEngine & EngineKind::JIT) && !ExecutionEngine::JITCtor &&
+ !ExecutionEngine::MCJITCtor) {
if (ErrorStr)
*ErrorStr = "JIT has not been linked in.";
}
- return 0;
+ return nullptr;
}
void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
@@ -520,7 +557,7 @@ void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
return getPointerToFunction(F);
MutexGuard locked(lock);
- if (void *P = EEState.getGlobalAddressMap(locked)[GV])
+ if (void *P = EEState.getGlobalAddressMap()[GV])
return P;
// Global variable might have been added since interpreter started.
@@ -530,7 +567,7 @@ void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
else
llvm_unreachable("Global hasn't had an address allocated yet!");
- return EEState.getGlobalAddressMap(locked)[GV];
+ return EEState.getGlobalAddressMap()[GV];
}
/// \brief Converts a Constant* into a GenericValue, including handling of
@@ -590,8 +627,8 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
case Instruction::GetElementPtr: {
// Compute the index
GenericValue Result = getConstantValue(Op0);
- APInt Offset(TD->getPointerSizeInBits(), 0);
- cast<GEPOperator>(CE)->accumulateConstantOffset(*TD, Offset);
+ APInt Offset(DL->getPointerSizeInBits(), 0);
+ cast<GEPOperator>(CE)->accumulateConstantOffset(*DL, Offset);
char* tmp = (char*) Result.PointerVal;
Result = PTOGV(tmp + Offset.getSExtValue());
@@ -678,16 +715,16 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
}
case Instruction::PtrToInt: {
GenericValue GV = getConstantValue(Op0);
- uint32_t PtrWidth = TD->getTypeSizeInBits(Op0->getType());
+ uint32_t PtrWidth = DL->getTypeSizeInBits(Op0->getType());
assert(PtrWidth <= 64 && "Bad pointer width");
GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
- uint32_t IntWidth = TD->getTypeSizeInBits(CE->getType());
+ uint32_t IntWidth = DL->getTypeSizeInBits(CE->getType());
GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth);
return GV;
}
case Instruction::IntToPtr: {
GenericValue GV = getConstantValue(Op0);
- uint32_t PtrWidth = TD->getTypeSizeInBits(CE->getType());
+ uint32_t PtrWidth = DL->getTypeSizeInBits(CE->getType());
GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue()));
@@ -848,7 +885,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
break;
case Type::PointerTyID:
if (isa<ConstantPointerNull>(C))
- Result.PointerVal = 0;
+ Result.PointerVal = nullptr;
else if (const Function *F = dyn_cast<Function>(C))
Result = PTOGV(getPointerToFunctionOrStub(const_cast<Function*>(F)));
else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
@@ -1193,33 +1230,29 @@ void ExecutionEngine::emitGlobals() {
if (Modules.size() != 1) {
for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
Module &M = *Modules[m];
- for (Module::const_global_iterator I = M.global_begin(),
- E = M.global_end(); I != E; ++I) {
- const GlobalValue *GV = I;
- if (GV->hasLocalLinkage() || GV->isDeclaration() ||
- GV->hasAppendingLinkage() || !GV->hasName())
+ for (const auto &GV : M.globals()) {
+ if (GV.hasLocalLinkage() || GV.isDeclaration() ||
+ GV.hasAppendingLinkage() || !GV.hasName())
continue;// Ignore external globals and globals with internal linkage.
const GlobalValue *&GVEntry =
- LinkedGlobalsMap[std::make_pair(GV->getName(), GV->getType())];
+ LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())];
// If this is the first time we've seen this global, it is the canonical
// version.
if (!GVEntry) {
- GVEntry = GV;
+ GVEntry = &GV;
continue;
}
// If the existing global is strong, never replace it.
- if (GVEntry->hasExternalLinkage() ||
- GVEntry->hasDLLImportLinkage() ||
- GVEntry->hasDLLExportLinkage())
+ if (GVEntry->hasExternalLinkage())
continue;
// Otherwise, we know it's linkonce/weak, replace it if this is a strong
// symbol. FIXME is this right for common?
- if (GV->hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
- GVEntry = GV;
+ if (GV.hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
+ GVEntry = &GV;
}
}
}
@@ -1227,31 +1260,30 @@ void ExecutionEngine::emitGlobals() {
std::vector<const GlobalValue*> NonCanonicalGlobals;
for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
Module &M = *Modules[m];
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
+ for (const auto &GV : M.globals()) {
// In the multi-module case, see what this global maps to.
if (!LinkedGlobalsMap.empty()) {
if (const GlobalValue *GVEntry =
- LinkedGlobalsMap[std::make_pair(I->getName(), I->getType())]) {
+ LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())]) {
// If something else is the canonical global, ignore this one.
- if (GVEntry != &*I) {
- NonCanonicalGlobals.push_back(I);
+ if (GVEntry != &GV) {
+ NonCanonicalGlobals.push_back(&GV);
continue;
}
}
}
- if (!I->isDeclaration()) {
- addGlobalMapping(I, getMemoryForGV(I));
+ if (!GV.isDeclaration()) {
+ addGlobalMapping(&GV, getMemoryForGV(&GV));
} else {
// External variable reference. Try to use the dynamic loader to
// get a pointer to it.
if (void *SymAddr =
- sys::DynamicLibrary::SearchForAddressOfSymbol(I->getName()))
- addGlobalMapping(I, SymAddr);
+ sys::DynamicLibrary::SearchForAddressOfSymbol(GV.getName()))
+ addGlobalMapping(&GV, SymAddr);
else {
report_fatal_error("Could not resolve external global address: "
- +I->getName());
+ +GV.getName());
}
}
}
@@ -1271,16 +1303,15 @@ void ExecutionEngine::emitGlobals() {
// Now that all of the globals are set up in memory, loop through them all
// and initialize their contents.
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
- if (!I->isDeclaration()) {
+ for (const auto &GV : M.globals()) {
+ if (!GV.isDeclaration()) {
if (!LinkedGlobalsMap.empty()) {
if (const GlobalValue *GVEntry =
- LinkedGlobalsMap[std::make_pair(I->getName(), I->getType())])
- if (GVEntry != &*I) // Not the canonical variable.
+ LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())])
+ if (GVEntry != &GV) // Not the canonical variable.
continue;
}
- EmitGlobalVariable(I);
+ EmitGlobalVariable(&GV);
}
}
}
@@ -1292,12 +1323,12 @@ void ExecutionEngine::emitGlobals() {
void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
void *GA = getPointerToGlobalIfAvailable(GV);
- if (GA == 0) {
+ if (!GA) {
// If it's not already specified, allocate memory for the global.
GA = getMemoryForGV(GV);
// If we failed to allocate memory for this global, return.
- if (GA == 0) return;
+ if (!GA) return;
addGlobalMapping(GV, GA);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
index 2d34eea..6ff1e7a 100644
--- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "llvm-c/ExecutionEngine.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
@@ -23,17 +22,11 @@
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
// Wrapping the C bindings types.
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(GenericValue, LLVMGenericValueRef)
-inline DataLayout *unwrap(LLVMTargetDataRef P) {
- return reinterpret_cast<DataLayout*>(P);
-}
-
-inline LLVMTargetDataRef wrap(const DataLayout *P) {
- return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P));
-}
-
inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) {
return reinterpret_cast<TargetLibraryInfo*>(P);
}
@@ -43,6 +36,11 @@ inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfo *P) {
return reinterpret_cast<LLVMTargetLibraryInfoRef>(X);
}
+inline LLVMTargetMachineRef wrap(const TargetMachine *P) {
+ return
+ reinterpret_cast<LLVMTargetMachineRef>(const_cast<TargetMachine*>(P));
+}
+
/*===-- Operations on generic values --------------------------------------===*/
LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty,
@@ -323,6 +321,11 @@ LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
return wrap(unwrap(EE)->getDataLayout());
}
+LLVMTargetMachineRef
+LLVMGetExecutionEngineTargetMachine(LLVMExecutionEngineRef EE) {
+ return wrap(unwrap(EE)->getTargetMachine());
+}
+
void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
void* Addr) {
unwrap(EE)->addGlobalMapping(unwrap<GlobalValue>(Global), Addr);
@@ -350,17 +353,17 @@ public:
SimpleBindingMemoryManager(const SimpleBindingMMFunctions& Functions,
void *Opaque);
virtual ~SimpleBindingMemoryManager();
-
- virtual uint8_t *allocateCodeSection(
- uintptr_t Size, unsigned Alignment, unsigned SectionID,
- StringRef SectionName);
- virtual uint8_t *allocateDataSection(
- uintptr_t Size, unsigned Alignment, unsigned SectionID,
- StringRef SectionName, bool isReadOnly);
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override;
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool isReadOnly) override;
+
+ bool finalizeMemory(std::string *ErrMsg) override;
- virtual bool finalizeMemory(std::string *ErrMsg);
-
private:
SimpleBindingMMFunctions Functions;
void *Opaque;
@@ -400,7 +403,7 @@ uint8_t *SimpleBindingMemoryManager::allocateDataSection(
}
bool SimpleBindingMemoryManager::finalizeMemory(std::string *ErrMsg) {
- char *errMsgCString = 0;
+ char *errMsgCString = nullptr;
bool result = Functions.FinalizeMemory(Opaque, &errMsgCString);
assert((result || !errMsgCString) &&
"Did not expect an error message if FinalizeMemory succeeded");
@@ -423,7 +426,7 @@ LLVMMCJITMemoryManagerRef LLVMCreateSimpleMCJITMemoryManager(
if (!AllocateCodeSection || !AllocateDataSection || !FinalizeMemory ||
!Destroy)
- return NULL;
+ return nullptr;
SimpleBindingMMFunctions functions;
functions.AllocateCodeSection = AllocateCodeSection;
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
index 7dc295f..4e22a8b 100644
--- a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
@@ -15,12 +15,10 @@
#include "llvm/Config/config.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
-#define DEBUG_TYPE "amplifier-jit-event-listener"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Metadata.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/ExecutionEngine/ObjectImage.h"
@@ -28,19 +26,21 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Errno.h"
-#include "llvm/Support/ValueHandle.h"
+#include "llvm/IR/ValueHandle.h"
#include "EventListenerCommon.h"
#include "IntelJITEventsWrapper.h"
using namespace llvm;
using namespace llvm::jitprofiling;
+#define DEBUG_TYPE "amplifier-jit-event-listener"
+
namespace {
class IntelJITEventListener : public JITEventListener {
typedef DenseMap<void*, unsigned int> MethodIDMap;
- OwningPtr<IntelJITEventsWrapper> Wrapper;
+ std::unique_ptr<IntelJITEventsWrapper> Wrapper;
MethodIDMap MethodIDs;
FilenameCache Filenames;
@@ -86,7 +86,7 @@ static LineNumberInfo DILineInfoToIntelJITFormat(uintptr_t StartAddress,
LineNumberInfo Result;
Result.Offset = Address - StartAddress;
- Result.LineNumber = Line.getLine();
+ Result.LineNumber = Line.Line;
return Result;
}
@@ -194,11 +194,10 @@ void IntelJITEventListener::NotifyObjectEmitted(const ObjectImage &Obj) {
MethodAddressVector Functions;
// Use symbol info to iterate functions in the object.
- error_code ec;
for (object::symbol_iterator I = Obj.begin_symbols(),
E = Obj.end_symbols();
- I != E && !ec;
- I.increment(ec)) {
+ I != E;
+ ++I) {
std::vector<LineNumberInfo> LineInfo;
std::string SourceFileName;
@@ -234,8 +233,8 @@ void IntelJITEventListener::NotifyObjectEmitted(const ObjectImage &Obj) {
FunctionMessage.line_number_size = 0;
FunctionMessage.line_number_table = 0;
} else {
- SourceFileName = Lines.front().second.getFileName();
- FunctionMessage.source_file_name = (char *)SourceFileName.c_str();
+ SourceFileName = Lines.front().second.FileName;
+ FunctionMessage.source_file_name = const_cast<char *>(SourceFileName.c_str());
FunctionMessage.line_number_size = LineInfo.size();
FunctionMessage.line_number_table = &*LineInfo.begin();
}
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
index f08e287..8d16ee8 100644
--- a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
@@ -164,10 +164,10 @@ typedef struct _iJIT_Method_NIDS
typedef struct _LineNumberInfo
{
- /* x86 Offset from the begining of the method*/
- unsigned int Offset;
-
- /* source line number from the begining of the source file */
+ /* x86 Offset from the beginning of the method*/
+ unsigned int Offset;
+
+ /* source line number from the beginning of the source file */
unsigned int LineNumber;
} *pLineNumberInfo, LineNumberInfo;
@@ -191,9 +191,9 @@ typedef struct _iJIT_Method_Load
unsigned int method_size;
/* Line Table size in number of entries - Zero if none */
- unsigned int line_number_size;
-
- /* Pointer to the begining of the line numbers info array */
+ unsigned int line_number_size;
+
+ /* Pointer to the beginning of the line numbers info array */
pLineNumberInfo line_number_table;
/* unique class ID */
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index 5de0659..93bb2d1 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -11,23 +11,24 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "interpreter"
#include "Interpreter.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cmath>
using namespace llvm;
+#define DEBUG_TYPE "interpreter"
+
STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
@@ -57,7 +58,7 @@ static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
IMPLEMENT_BINARY_OPERATOR(+, Double);
default:
dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -68,7 +69,7 @@ static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
IMPLEMENT_BINARY_OPERATOR(-, Double);
default:
dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -79,7 +80,7 @@ static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
IMPLEMENT_BINARY_OPERATOR(*, Double);
default:
dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -90,7 +91,7 @@ static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
IMPLEMENT_BINARY_OPERATOR(/, Double);
default:
dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -105,7 +106,7 @@ static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
break;
default:
dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -142,7 +143,7 @@ static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(==);
default:
dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -156,7 +157,7 @@ static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(!=);
default:
dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -170,7 +171,7 @@ static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(<);
default:
dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -184,7 +185,7 @@ static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(<);
default:
dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -198,7 +199,7 @@ static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(>);
default:
dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -212,7 +213,7 @@ static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(>);
default:
dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -226,7 +227,7 @@ static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(<=);
default:
dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -240,7 +241,7 @@ static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(<=);
default:
dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -254,7 +255,7 @@ static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(>=);
default:
dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -268,7 +269,7 @@ static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_POINTER_ICMP(>=);
default:
dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -293,7 +294,7 @@ void Interpreter::visitICmpInst(ICmpInst &I) {
case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
default:
dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
SetValue(&I, R, SF);
@@ -329,7 +330,7 @@ static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(==);
default:
dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -385,7 +386,7 @@ static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(!=);
default:
dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
// in vector case mask out NaN elements
if (Ty->isVectorTy())
@@ -405,7 +406,7 @@ static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(<=);
default:
dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -419,7 +420,7 @@ static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(>=);
default:
dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -433,7 +434,7 @@ static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(<);
default:
dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -447,7 +448,7 @@ static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
IMPLEMENT_VECTOR_FCMP(>);
default:
dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
return Dest;
}
@@ -615,7 +616,7 @@ void Interpreter::visitFCmpInst(FCmpInst &I) {
switch (I.getPredicate()) {
default:
dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
break;
case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
break;
@@ -672,7 +673,7 @@ static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
default:
dbgs() << "Unhandled Cmp predicate\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
@@ -726,7 +727,7 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) {
switch(I.getOpcode()){
default:
dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
break;
case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
@@ -754,7 +755,7 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) {
fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
else {
dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
}
break;
@@ -763,7 +764,7 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) {
switch (I.getOpcode()) {
default:
dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
break;
case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
@@ -896,7 +897,7 @@ void Interpreter::visitSwitchInst(SwitchInst &I) {
GenericValue CondVal = getOperandValue(Cond, SF);
// Check to see if any of the cases match...
- BasicBlock *Dest = 0;
+ BasicBlock *Dest = nullptr;
for (SwitchInst::CaseIt i = I.case_begin(), e = I.case_end(); i != e; ++i) {
GenericValue CaseVal = getOperandValue(i.getCaseValue(), SF);
if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
@@ -979,7 +980,7 @@ void Interpreter::visitAllocaInst(AllocaInst &I) {
<< uintptr_t(Memory) << '\n');
GenericValue Result = PTOGV(Memory);
- assert(Result.PointerVal != 0 && "Null pointer returned by malloc!");
+ assert(Result.PointerVal && "Null pointer returned by malloc!");
SetValue(&I, Result, SF);
if (I.getOpcode() == Instruction::Alloca)
@@ -1120,7 +1121,7 @@ void Interpreter::visitCallSite(CallSite CS) {
callFunction((Function*)GVTOP(SRC), ArgVals);
}
-// auxilary function for shift operations
+// auxiliary function for shift operations
static unsigned getShiftAmount(uint64_t orgShiftAmount,
llvm::APInt valueToShift) {
unsigned valueWidth = valueToShift.getBitWidth();
@@ -1732,7 +1733,7 @@ void Interpreter::visitVAArgInst(VAArgInst &I) {
IMPLEMENT_VAARG(Double);
default:
dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
// Set the Value of this Instruction.
@@ -1756,7 +1757,7 @@ void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
default:
dbgs() << "Unhandled destination type for extractelement instruction: "
<< *Ty << "\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
break;
case Type::IntegerTyID:
Dest.IntVal = Src1.AggregateVal[indx].IntVal;
@@ -2073,7 +2074,7 @@ GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
//
void Interpreter::callFunction(Function *F,
const std::vector<GenericValue> &ArgVals) {
- assert((ECStack.empty() || ECStack.back().Caller.getInstruction() == 0 ||
+ assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() ||
ECStack.back().Caller.arg_size() == ArgVals.size()) &&
"Incorrect number of arguments passed into function call!");
// Make a new stack frame... and fill it in.
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index a03c7f5..671bbee 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -98,13 +98,13 @@ static ExFunc lookupFunction(const Function *F) {
sys::ScopedLock Writer(*FunctionsLock);
ExFunc FnPtr = FuncNames[ExtName];
- if (FnPtr == 0)
+ if (!FnPtr)
FnPtr = FuncNames["lle_X_" + F->getName().str()];
- if (FnPtr == 0) // Try calling a generic function... if it exists...
+ if (!FnPtr) // Try calling a generic function... if it exists...
FnPtr = (ExFunc)(intptr_t)
sys::DynamicLibrary::SearchForAddressOfSymbol("lle_X_" +
F->getName().str());
- if (FnPtr != 0)
+ if (FnPtr)
ExportedFunctions->insert(std::make_pair(F, FnPtr)); // Cache for later
return FnPtr;
}
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
index 9ee9d94..814efcc 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
@@ -34,9 +34,12 @@ extern "C" void LLVMLinkInInterpreter() { }
///
ExecutionEngine *Interpreter::create(Module *M, std::string* ErrStr) {
// Tell this Module to materialize everything and release the GVMaterializer.
- if (M->MaterializeAllPermanently(ErrStr))
+ if (std::error_code EC = M->materializeAllPermanently()) {
+ if (ErrStr)
+ *ErrStr = EC.message();
// We got an error, just return 0
- return 0;
+ return nullptr;
+ }
return new Interpreter(M);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
index 98269ef..2145cde 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -16,10 +16,10 @@
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
-#include "llvm/InstVisitor.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -108,29 +108,29 @@ public:
/// create - Create an interpreter ExecutionEngine. This can never fail.
///
- static ExecutionEngine *create(Module *M, std::string *ErrorStr = 0);
+ static ExecutionEngine *create(Module *M, std::string *ErrorStr = nullptr);
/// run - Start execution with the specified function and arguments.
///
- virtual GenericValue runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues);
+ GenericValue runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues) override;
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true) {
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) override {
// FIXME: not implemented.
- return 0;
+ return nullptr;
}
/// recompileAndRelinkFunction - For the interpreter, functions are always
/// up-to-date.
///
- virtual void *recompileAndRelinkFunction(Function *F) {
+ void *recompileAndRelinkFunction(Function *F) override {
return getPointerToFunction(F);
}
/// freeMachineCodeForFunction - The interpreter does not generate any code.
///
- void freeMachineCodeForFunction(Function *F) { }
+ void freeMachineCodeForFunction(Function *F) override { }
// Methods used to execute code:
// Place a call on the stack
@@ -212,8 +212,8 @@ private: // Helper functions
//
void SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF);
- void *getPointerToFunction(Function *F) { return (void*)F; }
- void *getPointerToBasicBlock(BasicBlock *BB) { return (void*)BB; }
+ void *getPointerToFunction(Function *F) override { return (void*)F; }
+ void *getPointerToBasicBlock(BasicBlock *BB) override { return (void*)BB; }
void initializeExecutionEngine() { }
void initializeExternalFunctions();
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
index 246a675..83ec978 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
@@ -26,6 +26,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
@@ -78,7 +79,7 @@ ExecutionEngine *JIT::createJIT(Module *M,
// Try to register the program as a source of symbols to resolve against.
//
// FIXME: Don't do this here.
- sys::DynamicLibrary::LoadLibraryPermanently(0, NULL);
+ sys::DynamicLibrary::LoadLibraryPermanently(nullptr, nullptr);
// If the target supports JIT code generation, create the JIT.
if (TargetJITInfo *TJ = TM->getJITInfo()) {
@@ -86,7 +87,7 @@ ExecutionEngine *JIT::createJIT(Module *M,
} else {
if (ErrorStr)
*ErrorStr = "target does not support JIT code generation";
- return 0;
+ return nullptr;
}
}
@@ -150,12 +151,13 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
// Add target data
MutexGuard locked(lock);
- FunctionPassManager &PM = jitstate->getPM(locked);
- PM.add(new DataLayout(*TM.getDataLayout()));
+ FunctionPassManager &PM = jitstate->getPM();
+ M->setDataLayout(TM.getDataLayout());
+ PM.add(new DataLayoutPass(M));
// Turn the machine code intermediate representation into bytes in memory that
// may be executed.
- if (TM.addPassesToEmitMachineCode(PM, *JCE)) {
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, !getVerifyModules())) {
report_fatal_error("Target does not support machine code emission!");
}
@@ -182,12 +184,13 @@ void JIT::addModule(Module *M) {
jitstate = new JITState(M);
- FunctionPassManager &PM = jitstate->getPM(locked);
- PM.add(new DataLayout(*TM.getDataLayout()));
+ FunctionPassManager &PM = jitstate->getPM();
+ M->setDataLayout(TM.getDataLayout());
+ PM.add(new DataLayoutPass(M));
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
- if (TM.addPassesToEmitMachineCode(PM, *JCE)) {
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, !getVerifyModules())) {
report_fatal_error("Target does not support machine code emission!");
}
@@ -207,18 +210,19 @@ bool JIT::removeModule(Module *M) {
if (jitstate && jitstate->getModule() == M) {
delete jitstate;
- jitstate = 0;
+ jitstate = nullptr;
}
if (!jitstate && !Modules.empty()) {
jitstate = new JITState(Modules[0]);
- FunctionPassManager &PM = jitstate->getPM(locked);
- PM.add(new DataLayout(*TM.getDataLayout()));
+ FunctionPassManager &PM = jitstate->getPM();
+ M->setDataLayout(TM.getDataLayout());
+ PM.add(new DataLayoutPass(M));
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
- if (TM.addPassesToEmitMachineCode(PM, *JCE)) {
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, !getVerifyModules())) {
report_fatal_error("Target does not support machine code emission!");
}
@@ -349,7 +353,7 @@ GenericValue JIT::runFunction(Function *F,
// currently don't support varargs.
SmallVector<Value*, 8> Args;
for (unsigned i = 0, e = ArgValues.size(); i != e; ++i) {
- Constant *C = 0;
+ Constant *C = nullptr;
Type *ArgTy = FTy->getParamType(i);
const GenericValue &AV = ArgValues[i];
switch (ArgTy->getTypeID()) {
@@ -402,13 +406,13 @@ GenericValue JIT::runFunction(Function *F,
}
void JIT::RegisterJITEventListener(JITEventListener *L) {
- if (L == NULL)
+ if (!L)
return;
MutexGuard locked(lock);
EventListeners.push_back(L);
}
void JIT::UnregisterJITEventListener(JITEventListener *L) {
- if (L == NULL)
+ if (!L)
return;
MutexGuard locked(lock);
std::vector<JITEventListener*>::reverse_iterator I=
@@ -446,9 +450,8 @@ void JIT::runJITOnFunction(Function *F, MachineCodeInfo *MCI) {
MachineCodeInfo *const MCI;
public:
MCIListener(MachineCodeInfo *mci) : MCI(mci) {}
- virtual void NotifyFunctionEmitted(const Function &,
- void *Code, size_t Size,
- const EmittedFunctionDetails &) {
+ void NotifyFunctionEmitted(const Function &, void *Code, size_t Size,
+ const EmittedFunctionDetails &) override {
MCI->setAddress(Code);
MCI->setSize(Size);
}
@@ -457,41 +460,41 @@ void JIT::runJITOnFunction(Function *F, MachineCodeInfo *MCI) {
if (MCI)
RegisterJITEventListener(&MCIL);
- runJITOnFunctionUnlocked(F, locked);
+ runJITOnFunctionUnlocked(F);
if (MCI)
UnregisterJITEventListener(&MCIL);
}
-void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) {
+void JIT::runJITOnFunctionUnlocked(Function *F) {
assert(!isAlreadyCodeGenerating && "Error: Recursive compilation detected!");
- jitTheFunction(F, locked);
+ jitTheFunctionUnlocked(F);
// If the function referred to another function that had not yet been
// read from bitcode, and we are jitting non-lazily, emit it now.
- while (!jitstate->getPendingFunctions(locked).empty()) {
- Function *PF = jitstate->getPendingFunctions(locked).back();
- jitstate->getPendingFunctions(locked).pop_back();
+ while (!jitstate->getPendingFunctions().empty()) {
+ Function *PF = jitstate->getPendingFunctions().back();
+ jitstate->getPendingFunctions().pop_back();
assert(!PF->hasAvailableExternallyLinkage() &&
"Externally-defined function should not be in pending list.");
- jitTheFunction(PF, locked);
+ jitTheFunctionUnlocked(PF);
// Now that the function has been jitted, ask the JITEmitter to rewrite
// the stub with real address of the function.
- updateFunctionStub(PF);
+ updateFunctionStubUnlocked(PF);
}
}
-void JIT::jitTheFunction(Function *F, const MutexGuard &locked) {
+void JIT::jitTheFunctionUnlocked(Function *F) {
isAlreadyCodeGenerating = true;
- jitstate->getPM(locked).run(*F);
+ jitstate->getPM().run(*F);
isAlreadyCodeGenerating = false;
// clear basic block addresses after this function is done
- getBasicBlockAddressMap(locked).clear();
+ getBasicBlockAddressMap().clear();
}
/// getPointerToFunction - This method is used to get the address of the
@@ -523,7 +526,7 @@ void *JIT::getPointerToFunction(Function *F) {
return Addr;
}
- runJITOnFunctionUnlocked(F, locked);
+ runJITOnFunctionUnlocked(F);
void *Addr = getPointerToGlobalIfAvailable(F);
assert(Addr && "Code generation didn't add function to GlobalAddress table!");
@@ -534,9 +537,9 @@ void JIT::addPointerToBasicBlock(const BasicBlock *BB, void *Addr) {
MutexGuard locked(lock);
BasicBlockAddressMapTy::iterator I =
- getBasicBlockAddressMap(locked).find(BB);
- if (I == getBasicBlockAddressMap(locked).end()) {
- getBasicBlockAddressMap(locked)[BB] = Addr;
+ getBasicBlockAddressMap().find(BB);
+ if (I == getBasicBlockAddressMap().end()) {
+ getBasicBlockAddressMap()[BB] = Addr;
} else {
// ignore repeats: some BBs can be split into few MBBs?
}
@@ -544,7 +547,7 @@ void JIT::addPointerToBasicBlock(const BasicBlock *BB, void *Addr) {
void JIT::clearPointerToBasicBlock(const BasicBlock *BB) {
MutexGuard locked(lock);
- getBasicBlockAddressMap(locked).erase(BB);
+ getBasicBlockAddressMap().erase(BB);
}
void *JIT::getPointerToBasicBlock(BasicBlock *BB) {
@@ -555,8 +558,8 @@ void *JIT::getPointerToBasicBlock(BasicBlock *BB) {
MutexGuard locked(lock);
BasicBlockAddressMapTy::iterator I =
- getBasicBlockAddressMap(locked).find(BB);
- if (I != getBasicBlockAddressMap(locked).end()) {
+ getBasicBlockAddressMap().find(BB);
+ if (I != getBasicBlockAddressMap().end()) {
return I->second;
} else {
llvm_unreachable("JIT does not have BB address for address-of-label, was"
@@ -581,7 +584,7 @@ void *JIT::getPointerToNamedFunction(const std::string &Name,
report_fatal_error("Program used external function '"+Name+
"' which could not be resolved!");
}
- return 0;
+ return nullptr;
}
@@ -601,7 +604,7 @@ void *JIT::getOrEmitGlobalVariable(const GlobalVariable *GV) {
return (void*)&__dso_handle;
#endif
Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(GV->getName());
- if (Ptr == 0) {
+ if (!Ptr) {
report_fatal_error("Could not resolve external global address: "
+GV->getName());
}
@@ -626,10 +629,10 @@ void *JIT::recompileAndRelinkFunction(Function *F) {
void *OldAddr = getPointerToGlobalIfAvailable(F);
// If it's not already compiled there is no reason to patch it up.
- if (OldAddr == 0) { return getPointerToFunction(F); }
+ if (!OldAddr) return getPointerToFunction(F);
// Delete the old function mapping.
- addGlobalMapping(F, 0);
+ addGlobalMapping(F, nullptr);
// Recodegen the function
runJITOnFunction(F);
@@ -685,7 +688,7 @@ char* JIT::getMemoryForGV(const GlobalVariable* GV) {
void JIT::addPendingFunction(Function *F) {
MutexGuard locked(lock);
- jitstate->getPendingFunctions(locked).push_back(F);
+ jitstate->getPendingFunctions().push_back(F);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
index 2ae155b..69a7c36 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
@@ -15,8 +15,8 @@
#define JIT_H
#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/PassManager.h"
-#include "llvm/Support/ValueHandle.h"
namespace llvm {
@@ -39,12 +39,12 @@ private:
public:
explicit JITState(Module *M) : PM(M), M(M) {}
- FunctionPassManager &getPM(const MutexGuard &L) {
+ FunctionPassManager &getPM() {
return PM;
}
Module *getModule() const { return M; }
- std::vector<AssertingVH<Function> > &getPendingFunctions(const MutexGuard &L){
+ std::vector<AssertingVH<Function> > &getPendingFunctions() {
return PendingFunctions;
}
};
@@ -106,16 +106,16 @@ public:
RM, CMM);
}
- virtual void addModule(Module *M);
+ void addModule(Module *M) override;
/// removeModule - Remove a Module from the list of modules. Returns true if
/// M is found.
- virtual bool removeModule(Module *M);
+ bool removeModule(Module *M) override;
/// runFunction - Start execution with the specified function and arguments.
///
- virtual GenericValue runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues);
+ GenericValue runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues) override;
/// getPointerToNamedFunction - This method returns the address of the
/// specified function by using the MemoryManager. As such it is only
@@ -125,8 +125,8 @@ public:
/// found, this function silently returns a null pointer. Otherwise,
/// it prints a message to stderr and aborts.
///
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true);
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) override;
// CompilationCallback - Invoked the first time that a call site is found,
// which causes lazy compilation of the target function.
@@ -136,7 +136,7 @@ public:
/// getPointerToFunction - This returns the address of the specified function,
/// compiling it if necessary.
///
- void *getPointerToFunction(Function *F);
+ void *getPointerToFunction(Function *F) override;
/// addPointerToBasicBlock - Adds address of the specific basic block.
void addPointerToBasicBlock(const BasicBlock *BB, void *Addr);
@@ -146,18 +146,18 @@ public:
/// getPointerToBasicBlock - This returns the address of the specified basic
/// block, assuming function is compiled.
- void *getPointerToBasicBlock(BasicBlock *BB);
+ void *getPointerToBasicBlock(BasicBlock *BB) override;
/// getOrEmitGlobalVariable - Return the address of the specified global
/// variable, possibly emitting it to memory if needed. This is used by the
/// Emitter.
- void *getOrEmitGlobalVariable(const GlobalVariable *GV);
+ void *getOrEmitGlobalVariable(const GlobalVariable *GV) override;
/// getPointerToFunctionOrStub - If the specified function has been
/// code-gen'd, return a pointer to the function. If not, compile it, or use
/// a stub to implement lazy compilation if available.
///
- void *getPointerToFunctionOrStub(Function *F);
+ void *getPointerToFunctionOrStub(Function *F) override;
/// recompileAndRelinkFunction - This method is used to force a function
/// which has already been compiled, to be compiled again, possibly
@@ -165,12 +165,12 @@ public:
/// with a branch to the new copy. If there was no old copy, this acts
/// just like JIT::getPointerToFunction().
///
- void *recompileAndRelinkFunction(Function *F);
+ void *recompileAndRelinkFunction(Function *F) override;
/// freeMachineCodeForFunction - deallocate memory used to code-generate this
/// Function.
///
- void freeMachineCodeForFunction(Function *F);
+ void freeMachineCodeForFunction(Function *F) override;
/// addPendingFunction - while jitting non-lazily, a called but non-codegen'd
/// function was encountered. Add it to a pending list to be processed after
@@ -189,10 +189,13 @@ public:
TargetMachine *TM);
// Run the JIT on F and return information about the generated code
- void runJITOnFunction(Function *F, MachineCodeInfo *MCI = 0);
+ void runJITOnFunction(Function *F, MachineCodeInfo *MCI = nullptr) override;
+
+ void RegisterJITEventListener(JITEventListener *L) override;
+ void UnregisterJITEventListener(JITEventListener *L) override;
+
+ TargetMachine *getTargetMachine() override { return &TM; }
- virtual void RegisterJITEventListener(JITEventListener *L);
- virtual void UnregisterJITEventListener(JITEventListener *L);
/// These functions correspond to the methods on JITEventListener. They
/// iterate over the registered listeners and call the corresponding method on
/// each.
@@ -202,7 +205,7 @@ public:
void NotifyFreeingMachineCode(void *OldPtr);
BasicBlockAddressMapTy &
- getBasicBlockAddressMap(const MutexGuard &) {
+ getBasicBlockAddressMap() {
return BasicBlockAddressMap;
}
@@ -210,14 +213,14 @@ public:
private:
static JITCodeEmitter *createEmitter(JIT &J, JITMemoryManager *JMM,
TargetMachine &tm);
- void runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked);
- void updateFunctionStub(Function *F);
- void jitTheFunction(Function *F, const MutexGuard &locked);
+ void runJITOnFunctionUnlocked(Function *F);
+ void updateFunctionStubUnlocked(Function *F);
+ void jitTheFunctionUnlocked(Function *F);
protected:
/// getMemoryforGV - Allocate memory for a global variable.
- virtual char* getMemoryForGV(const GlobalVariable* GV);
+ char* getMemoryForGV(const GlobalVariable* GV) override;
};
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
index acbbfa1..2ba1f86 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -12,14 +12,11 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "JIT.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/ValueMap.h"
#include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/CodeGen/MachineCodeInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -27,21 +24,22 @@
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRelocation.h"
-#include "llvm/DebugInfo.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITMemoryManager.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/Disassembler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/MutexGuard.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetJITInfo.h"
@@ -53,6 +51,8 @@
#endif
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
STATISTIC(NumBytes, "Number of bytes of machine code compiled");
STATISTIC(NumRelos, "Number of relocations applied");
STATISTIC(NumRetries, "Number of retries with more memory");
@@ -120,21 +120,16 @@ namespace {
#endif
}
- FunctionToLazyStubMapTy& getFunctionToLazyStubMap(
- const MutexGuard& locked) {
- assert(locked.holds(TheJIT->lock));
+ FunctionToLazyStubMapTy& getFunctionToLazyStubMap() {
return FunctionToLazyStubMap;
}
- GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap(const MutexGuard& lck) {
- assert(lck.holds(TheJIT->lock));
+ GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap() {
return GlobalToIndirectSymMap;
}
std::pair<void *, Function *> LookupFunctionFromCallSite(
- const MutexGuard &locked, void *CallSite) const {
- assert(locked.holds(TheJIT->lock));
-
+ void *CallSite) const {
// The address given to us for the stub may not be exactly right, it
// might be a little bit after the stub. As such, use upper_bound to
// find it.
@@ -146,9 +141,7 @@ namespace {
return *I;
}
- void AddCallSite(const MutexGuard &locked, void *CallSite, Function *F) {
- assert(locked.holds(TheJIT->lock));
-
+ void AddCallSite(void *CallSite, Function *F) {
bool Inserted = CallSiteToFunctionMap.insert(
std::make_pair(CallSite, F)).second;
(void)Inserted;
@@ -344,7 +337,8 @@ namespace {
void *FunctionBody; // Beginning of the function's allocation.
void *Code; // The address the function's code actually starts at.
void *ExceptionTable;
- EmittedCode() : FunctionBody(0), Code(0), ExceptionTable(0) {}
+ EmittedCode() : FunctionBody(nullptr), Code(nullptr),
+ ExceptionTable(nullptr) {}
};
struct EmittedFunctionConfig : public ValueMapConfig<const Function*> {
typedef JITEmitter *ExtraData;
@@ -361,7 +355,7 @@ namespace {
public:
JITEmitter(JIT &jit, JITMemoryManager *JMM, TargetMachine &TM)
- : SizeEstimate(0), Resolver(jit, *this), MMI(0), CurFn(0),
+ : SizeEstimate(0), Resolver(jit, *this), MMI(nullptr), CurFn(nullptr),
EmittedFunctions(this), TheJIT(&jit) {
MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
if (jit.getJITInfo().needsGOT()) {
@@ -376,8 +370,8 @@ namespace {
JITResolver &getJITResolver() { return Resolver; }
- virtual void startFunction(MachineFunction &F);
- virtual bool finishFunction(MachineFunction &F);
+ void startFunction(MachineFunction &F) override;
+ bool finishFunction(MachineFunction &F) override;
void emitConstantPool(MachineConstantPool *MCP);
void initJumpTableInfo(MachineJumpTableInfo *MJTI);
@@ -387,24 +381,23 @@ namespace {
unsigned StubSize, unsigned Alignment = 1);
void startGVStub(void *Buffer, unsigned StubSize);
void finishGVStub();
- virtual void *allocIndirectGV(const GlobalValue *GV,
- const uint8_t *Buffer, size_t Size,
- unsigned Alignment);
+ void *allocIndirectGV(const GlobalValue *GV, const uint8_t *Buffer,
+ size_t Size, unsigned Alignment) override;
/// allocateSpace - Reserves space in the current block if any, or
/// allocate a new one of the given size.
- virtual void *allocateSpace(uintptr_t Size, unsigned Alignment);
+ void *allocateSpace(uintptr_t Size, unsigned Alignment) override;
/// allocateGlobal - Allocate memory for a global. Unlike allocateSpace,
/// this method does not allocate memory in the current output buffer,
/// because a global may live longer than the current function.
- virtual void *allocateGlobal(uintptr_t Size, unsigned Alignment);
+ void *allocateGlobal(uintptr_t Size, unsigned Alignment) override;
- virtual void addRelocation(const MachineRelocation &MR) {
+ void addRelocation(const MachineRelocation &MR) override {
Relocations.push_back(MR);
}
- virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
+ void StartMachineBasicBlock(MachineBasicBlock *MBB) override {
if (MBBLocations.size() <= (unsigned)MBB->getNumber())
MBBLocations.resize((MBB->getNumber()+1)*2);
MBBLocations[MBB->getNumber()] = getCurrentPCValue();
@@ -415,10 +408,11 @@ namespace {
<< (void*) getCurrentPCValue() << "]\n");
}
- virtual uintptr_t getConstantPoolEntryAddress(unsigned Entry) const;
- virtual uintptr_t getJumpTableEntryAddress(unsigned Entry) const;
+ uintptr_t getConstantPoolEntryAddress(unsigned Entry) const override;
+ uintptr_t getJumpTableEntryAddress(unsigned Entry) const override;
- virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const{
+ uintptr_t
+ getMachineBasicBlockAddress(MachineBasicBlock *MBB) const override {
assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
MBBLocations[MBB->getNumber()] && "MBB not emitted!");
return MBBLocations[MBB->getNumber()];
@@ -433,22 +427,22 @@ namespace {
/// function body.
void deallocateMemForFunction(const Function *F);
- virtual void processDebugLoc(DebugLoc DL, bool BeforePrintingInsn);
+ void processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) override;
- virtual void emitLabel(MCSymbol *Label) {
+ void emitLabel(MCSymbol *Label) override {
LabelLocations[Label] = getCurrentPCValue();
}
- virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() {
+ DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() override {
return &LabelLocations;
}
- virtual uintptr_t getLabelAddress(MCSymbol *Label) const {
+ uintptr_t getLabelAddress(MCSymbol *Label) const override {
assert(LabelLocations.count(Label) && "Label not emitted!");
return LabelLocations.find(Label)->second;
}
- virtual void setModuleInfo(MachineModuleInfo* Info) {
+ void setModuleInfo(MachineModuleInfo* Info) override {
MMI = Info;
}
@@ -502,7 +496,7 @@ void *JITResolver::getLazyFunctionStubIfAvailable(Function *F) {
MutexGuard locked(TheJIT->lock);
// If we already have a stub for this function, recycle it.
- return state.getFunctionToLazyStubMap(locked).lookup(F);
+ return state.getFunctionToLazyStubMap().lookup(F);
}
/// getFunctionStub - This returns a pointer to a function stub, creating
@@ -511,13 +505,13 @@ void *JITResolver::getLazyFunctionStub(Function *F) {
MutexGuard locked(TheJIT->lock);
// If we already have a lazy stub for this function, recycle it.
- void *&Stub = state.getFunctionToLazyStubMap(locked)[F];
+ void *&Stub = state.getFunctionToLazyStubMap()[F];
if (Stub) return Stub;
// Call the lazy resolver function if we are JIT'ing lazily. Otherwise we
// must resolve the symbol now.
void *Actual = TheJIT->isCompilingLazily()
- ? (void *)(intptr_t)LazyResolverFn : (void *)0;
+ ? (void *)(intptr_t)LazyResolverFn : (void *)nullptr;
// If this is an external declaration, attempt to resolve the address now
// to place in the stub.
@@ -526,7 +520,7 @@ void *JITResolver::getLazyFunctionStub(Function *F) {
// If we resolved the symbol to a null address (eg. a weak external)
// don't emit a stub. Return a null pointer to the application.
- if (!Actual) return 0;
+ if (!Actual) return nullptr;
}
TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout();
@@ -553,7 +547,7 @@ void *JITResolver::getLazyFunctionStub(Function *F) {
// Finally, keep track of the stub-to-Function mapping so that the
// JITCompilerFn knows which function to compile!
- state.AddCallSite(locked, Stub, F);
+ state.AddCallSite(Stub, F);
} else if (!Actual) {
// If we are JIT'ing non-lazily but need to call a function that does not
// exist yet, add it to the JIT's work list so that we can fill in the
@@ -572,7 +566,7 @@ void *JITResolver::getGlobalValueIndirectSym(GlobalValue *GV, void *GVAddress) {
MutexGuard locked(TheJIT->lock);
// If we already have a stub for this global variable, recycle it.
- void *&IndirectSym = state.getGlobalToIndirectSymMap(locked)[GV];
+ void *&IndirectSym = state.getGlobalToIndirectSymMap()[GV];
if (IndirectSym) return IndirectSym;
// Otherwise, codegen a new indirect symbol.
@@ -593,8 +587,8 @@ void *JITResolver::getExternalFunctionStub(void *FnAddr) {
if (Stub) return Stub;
TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout();
- JE.startGVStub(0, SL.Size, SL.Alignment);
- Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr, JE);
+ JE.startGVStub(nullptr, SL.Size, SL.Alignment);
+ Stub = TheJIT->getJITInfo().emitFunctionStub(nullptr, FnAddr, JE);
JE.finishGVStub();
DEBUG(dbgs() << "JIT: Stub emitted at [" << Stub
@@ -620,8 +614,8 @@ void *JITResolver::JITCompilerFn(void *Stub) {
JITResolver *JR = StubToResolverMap->getResolverFromStub(Stub);
assert(JR && "Unable to find the corresponding JITResolver to the call site");
- Function* F = 0;
- void* ActualPtr = 0;
+ Function* F = nullptr;
+ void* ActualPtr = nullptr;
{
// Only lock for getting the Function. The call getPointerToFunction made
@@ -632,7 +626,7 @@ void *JITResolver::JITCompilerFn(void *Stub) {
// The address given to us for the stub may not be exactly right, it might
// be a little bit after the stub. As such, use upper_bound to find it.
std::pair<void*, Function*> I =
- JR->state.LookupFunctionFromCallSite(locked, Stub);
+ JR->state.LookupFunctionFromCallSite(Stub);
F = I.second;
ActualPtr = I.first;
}
@@ -683,13 +677,23 @@ void *JITResolver::JITCompilerFn(void *Stub) {
//===----------------------------------------------------------------------===//
// JITEmitter code.
//
+
+static GlobalObject *getSimpleAliasee(Constant *C) {
+ C = C->stripPointerCasts();
+ return dyn_cast<GlobalObject>(C);
+}
+
void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference,
bool MayNeedFarStub) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
return TheJIT->getOrEmitGlobalVariable(GV);
- if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
- return TheJIT->getPointerToGlobal(GA->resolveAliasedGlobal(false));
+ if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ // We can only handle simple cases.
+ if (GlobalValue *GV = getSimpleAliasee(GA->getAliasee()))
+ return TheJIT->getPointerToGlobal(GV);
+ return nullptr;
+ }
// If we have already compiled the function, return a pointer to its body.
Function *F = cast<Function>(V);
@@ -736,7 +740,7 @@ void JITEmitter::processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) {
const LLVMContext &Context = EmissionDetails.MF->getFunction()->getContext();
- if (DL.getScope(Context) != 0 && PrevDL != DL) {
+ if (DL.getScope(Context) != nullptr && PrevDL != DL) {
JITEvent_EmittedFunctionDetails::LineStart NextLine;
NextLine.Address = getCurrentPCValue();
NextLine.Loc = DL;
@@ -825,7 +829,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
// Resolve the relocations to concrete pointers.
for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
MachineRelocation &MR = Relocations[i];
- void *ResultPtr = 0;
+ void *ResultPtr = nullptr;
if (!MR.letTargetResolve()) {
if (MR.isExternalSymbol()) {
ResultPtr = TheJIT->getPointerToNamedFunction(MR.getExternalSymbol(),
@@ -871,7 +875,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
}
}
- CurFn = 0;
+ CurFn = nullptr;
TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
Relocations.size(), MemMgr->getGOTBase());
}
@@ -900,7 +904,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
SizeEstimate = 0;
}
- BufferBegin = CurBufferPtr = 0;
+ BufferBegin = CurBufferPtr = nullptr;
NumBytes += FnEnd-FnStart;
// Invalidate the icache if necessary.
@@ -924,11 +928,6 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
MemMgr->setMemoryExecutable();
DEBUG({
- if (sys::hasDisassembler()) {
- dbgs() << "JIT: Disassembled code:\n";
- dbgs() << sys::disassembleBuffer(FnStart, FnEnd-FnStart,
- (uintptr_t)FnStart);
- } else {
dbgs() << "JIT: Binary code:\n";
uint8_t* q = FnStart;
for (int i = 0; q < FnEnd; q += 4, ++i) {
@@ -950,7 +949,6 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
dbgs() << '\n';
}
dbgs()<< '\n';
- }
});
if (MMI)
@@ -1018,7 +1016,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
ConstantPoolBase = allocateSpace(Size, Align);
ConstantPool = MCP;
- if (ConstantPoolBase == 0) return; // Buffer overflow.
+ if (!ConstantPoolBase) return; // Buffer overflow.
DEBUG(dbgs() << "JIT: Emitted constant pool at [" << ConstantPoolBase
<< "] (size: " << Size << ", alignment: " << Align << ")\n");
@@ -1074,7 +1072,7 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
return;
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- if (JT.empty() || JumpTableBase == 0) return;
+ if (JT.empty() || !JumpTableBase) return;
switch (MJTI->getEntryKind()) {
@@ -1224,7 +1222,7 @@ void *JIT::getPointerToFunctionOrStub(Function *F) {
return JE->getJITResolver().getLazyFunctionStub(F);
}
-void JIT::updateFunctionStub(Function *F) {
+void JIT::updateFunctionStubUnlocked(Function *F) {
// Get the empty stub we generated earlier.
JITEmitter *JE = static_cast<JITEmitter*>(getCodeEmitter());
void *Stub = JE->getJITResolver().getLazyFunctionStub(F);
@@ -1244,7 +1242,7 @@ void JIT::updateFunctionStub(Function *F) {
void JIT::freeMachineCodeForFunction(Function *F) {
// Delete translation for this from the ExecutionEngine, so it will get
// retranslated next time it is used.
- updateGlobalMapping(F, 0);
+ updateGlobalMapping(F, nullptr);
// Free the actual memory for the function body and related stuff.
static_cast<JITEmitter*>(JCE)->deallocateMemForFunction(F);
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
index f58d31b..584b93f 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "jit"
#include "llvm/ExecutionEngine/JITMemoryManager.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -40,6 +39,8 @@
using namespace llvm;
+#define DEBUG_TYPE "jit"
+
STATISTIC(NumSlabs, "Number of slabs of memory allocated by the JIT");
JITMemoryManager::~JITMemoryManager() {}
@@ -80,7 +81,7 @@ namespace {
/// getFreeBlockBefore - If the block before this one is free, return it,
/// otherwise return null.
FreeRangeHeader *getFreeBlockBefore() const {
- if (PrevAllocated) return 0;
+ if (PrevAllocated) return nullptr;
intptr_t PrevSize = reinterpret_cast<intptr_t *>(
const_cast<MemoryRangeHeader *>(this))[-1];
return reinterpret_cast<FreeRangeHeader *>(
@@ -174,7 +175,7 @@ FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) {
// coalesce with it, update our notion of what the free list is.
if (&FollowingFreeBlock == FreeList) {
FreeList = FollowingFreeBlock.Next;
- FreeListToReturn = 0;
+ FreeListToReturn = nullptr;
assert(&FollowingFreeBlock != FreeList && "No tombstone block?");
}
FollowingFreeBlock.RemoveFromFreeList();
@@ -269,13 +270,12 @@ namespace {
class DefaultJITMemoryManager;
- class JITSlabAllocator : public SlabAllocator {
+ class JITAllocator {
DefaultJITMemoryManager &JMM;
public:
- JITSlabAllocator(DefaultJITMemoryManager &jmm) : JMM(jmm) { }
- virtual ~JITSlabAllocator() { }
- virtual MemSlab *Allocate(size_t Size);
- virtual void Deallocate(MemSlab *Slab);
+ JITAllocator(DefaultJITMemoryManager &jmm) : JMM(jmm) { }
+ void *Allocate(size_t Size, size_t /*Alignment*/);
+ void Deallocate(void *Slab, size_t Size);
};
/// DefaultJITMemoryManager - Manage memory for the JIT code generation.
@@ -285,7 +285,21 @@ namespace {
/// middle of emitting a function, and we don't know how large the function we
/// are emitting is.
class DefaultJITMemoryManager : public JITMemoryManager {
+ public:
+ /// DefaultCodeSlabSize - When we have to go map more memory, we allocate at
+ /// least this much unless more is requested. Currently, in 512k slabs.
+ static const size_t DefaultCodeSlabSize = 512 * 1024;
+
+ /// DefaultSlabSize - Allocate globals and stubs into slabs of 64K (probably
+ /// 16 pages) unless we get an allocation above SizeThreshold.
+ static const size_t DefaultSlabSize = 64 * 1024;
+
+ /// DefaultSizeThreshold - For any allocation larger than 16K (probably
+ /// 4 pages), we should allocate a separate slab to avoid wasted space at
+ /// the end of a normal slab.
+ static const size_t DefaultSizeThreshold = 16 * 1024;
+ private:
// Whether to poison freed memory.
bool PoisonMemory;
@@ -299,9 +313,10 @@ namespace {
// Memory slabs allocated by the JIT. We refer to them as slabs so we don't
// confuse them with the blocks of memory described above.
std::vector<sys::MemoryBlock> CodeSlabs;
- JITSlabAllocator BumpSlabAllocator;
- BumpPtrAllocator StubAllocator;
- BumpPtrAllocator DataAllocator;
+ BumpPtrAllocatorImpl<JITAllocator, DefaultSlabSize,
+ DefaultSizeThreshold> StubAllocator;
+ BumpPtrAllocatorImpl<JITAllocator, DefaultSlabSize,
+ DefaultSizeThreshold> DataAllocator;
// Circular list of free blocks.
FreeRangeHeader *FreeMemoryList;
@@ -318,37 +333,26 @@ namespace {
/// last slab it allocated, so that subsequent allocations follow it.
sys::MemoryBlock allocateNewSlab(size_t size);
- /// DefaultCodeSlabSize - When we have to go map more memory, we allocate at
- /// least this much unless more is requested.
- static const size_t DefaultCodeSlabSize;
-
- /// DefaultSlabSize - Allocate data into slabs of this size unless we get
- /// an allocation above SizeThreshold.
- static const size_t DefaultSlabSize;
-
- /// DefaultSizeThreshold - For any allocation larger than this threshold, we
- /// should allocate a separate slab.
- static const size_t DefaultSizeThreshold;
-
/// getPointerToNamedFunction - This method returns the address of the
/// specified function by using the dlsym function call.
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true);
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) override;
- void AllocateGOT();
+ void AllocateGOT() override;
// Testing methods.
- virtual bool CheckInvariants(std::string &ErrorStr);
- size_t GetDefaultCodeSlabSize() { return DefaultCodeSlabSize; }
- size_t GetDefaultDataSlabSize() { return DefaultSlabSize; }
- size_t GetDefaultStubSlabSize() { return DefaultSlabSize; }
- unsigned GetNumCodeSlabs() { return CodeSlabs.size(); }
- unsigned GetNumDataSlabs() { return DataAllocator.GetNumSlabs(); }
- unsigned GetNumStubSlabs() { return StubAllocator.GetNumSlabs(); }
+ bool CheckInvariants(std::string &ErrorStr) override;
+ size_t GetDefaultCodeSlabSize() override { return DefaultCodeSlabSize; }
+ size_t GetDefaultDataSlabSize() override { return DefaultSlabSize; }
+ size_t GetDefaultStubSlabSize() override { return DefaultSlabSize; }
+ unsigned GetNumCodeSlabs() override { return CodeSlabs.size(); }
+ unsigned GetNumDataSlabs() override { return DataAllocator.GetNumSlabs(); }
+ unsigned GetNumStubSlabs() override { return StubAllocator.GetNumSlabs(); }
/// startFunctionBody - When a function starts, allocate a block of free
/// executable memory, returning a pointer to it and its actual size.
- uint8_t *startFunctionBody(const Function *F, uintptr_t &ActualSize) {
+ uint8_t *startFunctionBody(const Function *F,
+ uintptr_t &ActualSize) override {
FreeRangeHeader* candidateBlock = FreeMemoryList;
FreeRangeHeader* head = FreeMemoryList;
@@ -422,7 +426,7 @@ namespace {
/// endFunctionBody - The function F is now allocated, and takes the memory
/// in the range [FunctionStart,FunctionEnd).
void endFunctionBody(const Function *F, uint8_t *FunctionStart,
- uint8_t *FunctionEnd) {
+ uint8_t *FunctionEnd) override {
assert(FunctionEnd > FunctionStart);
assert(FunctionStart == (uint8_t *)(CurBlock+1) &&
"Mismatched function start/end!");
@@ -435,7 +439,7 @@ namespace {
/// allocateSpace - Allocate a memory block of the given size. This method
/// cannot be called between calls to startFunctionBody and endFunctionBody.
- uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) {
+ uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) override {
CurBlock = FreeMemoryList;
FreeMemoryList = FreeMemoryList->AllocateBlock();
@@ -453,18 +457,19 @@ namespace {
/// allocateStub - Allocate memory for a function stub.
uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
- unsigned Alignment) {
+ unsigned Alignment) override {
return (uint8_t*)StubAllocator.Allocate(StubSize, Alignment);
}
/// allocateGlobal - Allocate memory for a global.
- uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) {
+ uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) override {
return (uint8_t*)DataAllocator.Allocate(Size, Alignment);
}
/// allocateCodeSection - Allocate memory for a code section.
uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID, StringRef SectionName) {
+ unsigned SectionID,
+ StringRef SectionName) override {
// Grow the required block size to account for the block header
Size += sizeof(*CurBlock);
@@ -511,15 +516,15 @@ namespace {
/// allocateDataSection - Allocate memory for a data section.
uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID, StringRef SectionName,
- bool IsReadOnly) {
+ bool IsReadOnly) override {
return (uint8_t*)DataAllocator.Allocate(Size, Alignment);
}
- bool finalizeMemory(std::string *ErrMsg) {
+ bool finalizeMemory(std::string *ErrMsg) override {
return false;
}
- uint8_t *getGOTBase() const {
+ uint8_t *getGOTBase() const override {
return GOTBase;
}
@@ -539,57 +544,49 @@ namespace {
/// deallocateFunctionBody - Deallocate all memory for the specified
/// function body.
- void deallocateFunctionBody(void *Body) {
+ void deallocateFunctionBody(void *Body) override {
if (Body) deallocateBlock(Body);
}
/// setMemoryWritable - When code generation is in progress,
/// the code pages may need permissions changed.
- void setMemoryWritable()
- {
+ void setMemoryWritable() override {
for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
sys::Memory::setWritable(CodeSlabs[i]);
}
/// setMemoryExecutable - When code generation is done and we're ready to
/// start execution, the code pages may need permissions changed.
- void setMemoryExecutable()
- {
+ void setMemoryExecutable() override {
for (unsigned i = 0, e = CodeSlabs.size(); i != e; ++i)
sys::Memory::setExecutable(CodeSlabs[i]);
}
/// setPoisonMemory - Controls whether we write garbage over freed memory.
///
- void setPoisonMemory(bool poison) {
+ void setPoisonMemory(bool poison) override {
PoisonMemory = poison;
}
};
}
-MemSlab *JITSlabAllocator::Allocate(size_t Size) {
+void *JITAllocator::Allocate(size_t Size, size_t /*Alignment*/) {
sys::MemoryBlock B = JMM.allocateNewSlab(Size);
- MemSlab *Slab = (MemSlab*)B.base();
- Slab->Size = B.size();
- Slab->NextPtr = 0;
- return Slab;
+ return B.base();
}
-void JITSlabAllocator::Deallocate(MemSlab *Slab) {
- sys::MemoryBlock B(Slab, Slab->Size);
+void JITAllocator::Deallocate(void *Slab, size_t Size) {
+ sys::MemoryBlock B(Slab, Size);
sys::Memory::ReleaseRWX(B);
}
DefaultJITMemoryManager::DefaultJITMemoryManager()
- :
+ :
#ifdef NDEBUG
- PoisonMemory(false),
+ PoisonMemory(false),
#else
- PoisonMemory(true),
+ PoisonMemory(true),
#endif
- LastSlab(0, 0),
- BumpSlabAllocator(*this),
- StubAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator),
- DataAllocator(DefaultSlabSize, DefaultSizeThreshold, BumpSlabAllocator) {
+ LastSlab(nullptr, 0), StubAllocator(*this), DataAllocator(*this) {
// Allocate space for code.
sys::MemoryBlock MemBlock = allocateNewSlab(DefaultCodeSlabSize);
@@ -642,11 +639,11 @@ DefaultJITMemoryManager::DefaultJITMemoryManager()
// Start out with the freelist pointing to Mem0.
FreeMemoryList = Mem0;
- GOTBase = NULL;
+ GOTBase = nullptr;
}
void DefaultJITMemoryManager::AllocateGOT() {
- assert(GOTBase == 0 && "Cannot allocate the got multiple times");
+ assert(!GOTBase && "Cannot allocate the got multiple times");
GOTBase = new uint8_t[sizeof(void*) * 8192];
HasGOT = true;
}
@@ -661,9 +658,9 @@ DefaultJITMemoryManager::~DefaultJITMemoryManager() {
sys::MemoryBlock DefaultJITMemoryManager::allocateNewSlab(size_t size) {
// Allocate a new block close to the last one.
std::string ErrMsg;
- sys::MemoryBlock *LastSlabPtr = LastSlab.base() ? &LastSlab : 0;
+ sys::MemoryBlock *LastSlabPtr = LastSlab.base() ? &LastSlab : nullptr;
sys::MemoryBlock B = sys::Memory::AllocateRWX(size, LastSlabPtr, &ErrMsg);
- if (B.base() == 0) {
+ if (!B.base()) {
report_fatal_error("Allocation failed when allocating new memory in the"
" JIT\n" + Twine(ErrMsg));
}
@@ -724,7 +721,7 @@ bool DefaultJITMemoryManager::CheckInvariants(std::string &ErrorStr) {
char *End = Start + I->size();
// Check each memory range.
- for (MemoryRangeHeader *Hdr = (MemoryRangeHeader*)Start, *LastHdr = NULL;
+ for (MemoryRangeHeader *Hdr = (MemoryRangeHeader*)Start, *LastHdr = nullptr;
Start <= (char*)Hdr && (char*)Hdr < End;
Hdr = &Hdr->getBlockAfter()) {
if (Hdr->ThisAllocated == 0) {
@@ -893,7 +890,7 @@ void *DefaultJITMemoryManager::getPointerToNamedFunction(const std::string &Name
report_fatal_error("Program used external function '"+Name+
"' which could not be resolved!");
}
- return 0;
+ return nullptr;
}
@@ -902,11 +899,6 @@ JITMemoryManager *JITMemoryManager::CreateDefaultMemManager() {
return new DefaultJITMemoryManager();
}
-// Allocate memory for code in 512K slabs.
-const size_t DefaultJITMemoryManager::DefaultCodeSlabSize = 512 * 1024;
-
-// Allocate globals and stubs in slabs of 64K. (probably 16 pages)
-const size_t DefaultJITMemoryManager::DefaultSlabSize = 64 * 1024;
-
-// Waste at most 16K at the end of each bump slab. (probably 4 pages)
-const size_t DefaultJITMemoryManager::DefaultSizeThreshold = 16 * 1024;
+const size_t DefaultJITMemoryManager::DefaultCodeSlabSize;
+const size_t DefaultJITMemoryManager::DefaultSlabSize;
+const size_t DefaultJITMemoryManager::DefaultSizeThreshold;
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
index 195c458..e9ba96a 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -14,17 +14,20 @@
#include "llvm/ExecutionEngine/MCJIT.h"
#include "llvm/ExecutionEngine/ObjectBuffer.h"
#include "llvm/ExecutionEngine/ObjectImage.h"
-#include "llvm/PassManager.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/PassManager.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/MutexGuard.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
@@ -47,7 +50,7 @@ ExecutionEngine *MCJIT::createJIT(Module *M,
// Try to register the program as a source of symbols to resolve against.
//
// FIXME: Don't do this here.
- sys::DynamicLibrary::LoadLibraryPermanently(0, NULL);
+ sys::DynamicLibrary::LoadLibraryPermanently(nullptr, nullptr);
return new MCJIT(M, TM, MemMgr ? MemMgr : new SectionMemoryManager(),
GVsWithCode);
@@ -55,8 +58,8 @@ ExecutionEngine *MCJIT::createJIT(Module *M,
MCJIT::MCJIT(Module *m, TargetMachine *tm, RTDyldMemoryManager *MM,
bool AllocateGVsWithCode)
- : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(this, MM), Dyld(&MemMgr),
- ObjCache(0) {
+ : ExecutionEngine(m), TM(tm), Ctx(nullptr), MemMgr(this, MM), Dyld(&MemMgr),
+ ObjCache(nullptr) {
OwnedModules.addModule(m);
setDataLayout(TM->getDataLayout());
@@ -77,15 +80,24 @@ MCJIT::~MCJIT() {
Modules.clear();
Dyld.deregisterEHFrames();
- LoadedObjectMap::iterator it, end = LoadedObjects.end();
- for (it = LoadedObjects.begin(); it != end; ++it) {
- ObjectImage *Obj = it->second;
+ LoadedObjectList::iterator it, end;
+ for (it = LoadedObjects.begin(), end = LoadedObjects.end(); it != end; ++it) {
+ ObjectImage *Obj = *it;
if (Obj) {
NotifyFreeingObject(*Obj);
delete Obj;
}
}
LoadedObjects.clear();
+
+
+ SmallVector<object::Archive *, 2>::iterator ArIt, ArEnd;
+ for (ArIt = Archives.begin(), ArEnd = Archives.end(); ArIt != ArEnd; ++ArIt) {
+ object::Archive *A = *ArIt;
+ delete A;
+ }
+ Archives.clear();
+
delete TM;
}
@@ -101,6 +113,21 @@ bool MCJIT::removeModule(Module *M) {
+void MCJIT::addObjectFile(std::unique_ptr<object::ObjectFile> Obj) {
+ ObjectImage *LoadedObject = Dyld.loadObject(std::move(Obj));
+ if (!LoadedObject || Dyld.hasError())
+ report_fatal_error(Dyld.getErrorString());
+
+ LoadedObjects.push_back(LoadedObject);
+
+ NotifyObjectEmitted(*LoadedObject);
+}
+
+void MCJIT::addArchive(object::Archive *A) {
+ Archives.push_back(A);
+}
+
+
void MCJIT::setObjectCache(ObjectCache* NewCache) {
MutexGuard locked(lock);
ObjCache = NewCache;
@@ -115,14 +142,16 @@ ObjectBufferStream* MCJIT::emitObject(Module *M) {
PassManager PM;
- PM.add(new DataLayout(*TM->getDataLayout()));
+ M->setDataLayout(TM->getDataLayout());
+ PM.add(new DataLayoutPass(M));
// The RuntimeDyld will take ownership of this shortly
- OwningPtr<ObjectBufferStream> CompiledObject(new ObjectBufferStream());
+ std::unique_ptr<ObjectBufferStream> CompiledObject(new ObjectBufferStream());
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
- if (TM->addPassesToEmitMC(PM, Ctx, CompiledObject->getOStream(), false)) {
+ if (TM->addPassesToEmitMC(PM, Ctx, CompiledObject->getOStream(),
+ !getVerifyModules())) {
report_fatal_error("Target does not support MC emission!");
}
@@ -136,11 +165,11 @@ ObjectBufferStream* MCJIT::emitObject(Module *M) {
if (ObjCache) {
// MemoryBuffer is a thin wrapper around the actual memory, so it's OK
// to create a temporary object here and delete it after the call.
- OwningPtr<MemoryBuffer> MB(CompiledObject->getMemBuffer());
+ std::unique_ptr<MemoryBuffer> MB(CompiledObject->getMemBuffer());
ObjCache->notifyObjectCompiled(M, MB.get());
}
- return CompiledObject.take();
+ return CompiledObject.release();
}
void MCJIT::generateCodeForModule(Module *M) {
@@ -155,12 +184,12 @@ void MCJIT::generateCodeForModule(Module *M) {
if (OwnedModules.hasModuleBeenLoaded(M))
return;
- OwningPtr<ObjectBuffer> ObjectToLoad;
+ std::unique_ptr<ObjectBuffer> ObjectToLoad;
// Try to load the pre-compiled object from cache if possible
- if (0 != ObjCache) {
- OwningPtr<MemoryBuffer> PreCompiledObject(ObjCache->getObject(M));
- if (0 != PreCompiledObject.get())
- ObjectToLoad.reset(new ObjectBuffer(PreCompiledObject.take()));
+ if (ObjCache) {
+ std::unique_ptr<MemoryBuffer> PreCompiledObject(ObjCache->getObject(M));
+ if (PreCompiledObject.get())
+ ObjectToLoad.reset(new ObjectBuffer(PreCompiledObject.release()));
}
// If the cache did not contain a suitable object, compile the object
@@ -170,9 +199,9 @@ void MCJIT::generateCodeForModule(Module *M) {
}
// Load the object into the dynamic linker.
- // MCJIT now owns the ObjectImage pointer (via its LoadedObjects map).
- ObjectImage *LoadedObject = Dyld.loadObject(ObjectToLoad.take());
- LoadedObjects[M] = LoadedObject;
+ // MCJIT now owns the ObjectImage pointer (via its LoadedObjects list).
+ ObjectImage *LoadedObject = Dyld.loadObject(ObjectToLoad.release());
+ LoadedObjects.push_back(LoadedObject);
if (!LoadedObject)
report_fatal_error(Dyld.getErrorString());
@@ -231,11 +260,10 @@ void *MCJIT::getPointerToBasicBlock(BasicBlock *BB) {
}
uint64_t MCJIT::getExistingSymbolAddress(const std::string &Name) {
- // Check with the RuntimeDyld to see if we already have this symbol.
- if (Name[0] == '\1')
- return Dyld.getSymbolLoadAddress(Name.substr(1));
- return Dyld.getSymbolLoadAddress((TM->getMCAsmInfo()->getGlobalPrefix()
- + Name));
+ Mangler Mang(TM->getDataLayout());
+ SmallString<128> FullName;
+ Mang.getNameWithPrefix(FullName, Name);
+ return Dyld.getSymbolLoadAddress(FullName);
}
Module *MCJIT::findModuleForSymbol(const std::string &Name,
@@ -258,7 +286,7 @@ Module *MCJIT::findModuleForSymbol(const std::string &Name,
}
}
// We didn't find the symbol in any of our modules.
- return NULL;
+ return nullptr;
}
uint64_t MCJIT::getSymbolAddress(const std::string &Name,
@@ -271,6 +299,31 @@ uint64_t MCJIT::getSymbolAddress(const std::string &Name,
if (Addr)
return Addr;
+ SmallVector<object::Archive*, 2>::iterator I, E;
+ for (I = Archives.begin(), E = Archives.end(); I != E; ++I) {
+ object::Archive *A = *I;
+ // Look for our symbols in each Archive
+ object::Archive::child_iterator ChildIt = A->findSym(Name);
+ if (ChildIt != A->child_end()) {
+ // FIXME: Support nested archives?
+ ErrorOr<std::unique_ptr<object::Binary>> ChildBinOrErr =
+ ChildIt->getAsBinary();
+ if (ChildBinOrErr.getError())
+ continue;
+ std::unique_ptr<object::Binary> ChildBin = std::move(ChildBinOrErr.get());
+ if (ChildBin->isObject()) {
+ std::unique_ptr<object::ObjectFile> OF(
+ static_cast<object::ObjectFile *>(ChildBin.release()));
+ // This causes the object file to be loaded.
+ addObjectFile(std::move(OF));
+ // The address should be here now.
+ Addr = getExistingSymbolAddress(Name);
+ if (Addr)
+ return Addr;
+ }
+ }
+ }
+
// If it hasn't already been generated, see if it's in one of our modules.
Module *M = findModuleForSymbol(Name, CheckFunctionsOnly);
if (!M)
@@ -317,18 +370,16 @@ void *MCJIT::getPointerToFunction(Function *F) {
generateCodeForModule(M);
else if (!OwnedModules.hasModuleBeenLoaded(M))
// If this function doesn't belong to one of our modules, we're done.
- return NULL;
+ return nullptr;
// FIXME: Should the Dyld be retaining module information? Probably not.
- // FIXME: Should we be using the mangler for this? Probably.
//
// This is the accessor for the target address, so make sure to check the
// load address of the symbol, not the local address.
- StringRef BaseName = F->getName();
- if (BaseName[0] == '\1')
- return (void*)Dyld.getSymbolLoadAddress(BaseName.substr(1));
- return (void*)Dyld.getSymbolLoadAddress((TM->getMCAsmInfo()->getGlobalPrefix()
- + BaseName).str());
+ Mangler Mang(TM->getDataLayout());
+ SmallString<128> Name;
+ TM->getNameWithPrefix(Name, F, Mang);
+ return (void*)Dyld.getSymbolLoadAddress(Name);
}
void *MCJIT::recompileAndRelinkFunction(Function *F) {
@@ -363,7 +414,7 @@ Function *MCJIT::FindFunctionNamedInModulePtrSet(const char *FnName,
if (Function *F = (*I)->getFunction(FnName))
return F;
}
- return 0;
+ return nullptr;
}
Function *MCJIT::FindFunctionNamed(const char *FnName) {
@@ -495,17 +546,17 @@ void *MCJIT::getPointerToNamedFunction(const std::string &Name,
report_fatal_error("Program used external function '"+Name+
"' which could not be resolved!");
}
- return 0;
+ return nullptr;
}
void MCJIT::RegisterJITEventListener(JITEventListener *L) {
- if (L == NULL)
+ if (!L)
return;
MutexGuard locked(lock);
EventListeners.push_back(L);
}
void MCJIT::UnregisterJITEventListener(JITEventListener *L) {
- if (L == NULL)
+ if (!L)
return;
MutexGuard locked(lock);
SmallVector<JITEventListener*, 2>::reverse_iterator I=
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
index 86b478b..100e9a2 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
@@ -31,43 +31,53 @@ public:
LinkingMemoryManager(MCJIT *Parent, RTDyldMemoryManager *MM)
: ParentEngine(Parent), ClientMM(MM) {}
- virtual uint64_t getSymbolAddress(const std::string &Name);
+ uint64_t getSymbolAddress(const std::string &Name) override;
// Functions deferred to client memory manager
- virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID, StringRef SectionName) {
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override {
return ClientMM->allocateCodeSection(Size, Alignment, SectionID, SectionName);
}
- virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
- unsigned SectionID, StringRef SectionName,
- bool IsReadOnly) {
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool IsReadOnly) override {
return ClientMM->allocateDataSection(Size, Alignment,
SectionID, SectionName, IsReadOnly);
}
- virtual void notifyObjectLoaded(ExecutionEngine *EE,
- const ObjectImage *Obj) {
+ void reserveAllocationSpace(uintptr_t CodeSize, uintptr_t DataSizeRO,
+ uintptr_t DataSizeRW) override {
+ return ClientMM->reserveAllocationSpace(CodeSize, DataSizeRO, DataSizeRW);
+ }
+
+ bool needsToReserveAllocationSpace() override {
+ return ClientMM->needsToReserveAllocationSpace();
+ }
+
+ void notifyObjectLoaded(ExecutionEngine *EE,
+ const ObjectImage *Obj) override {
ClientMM->notifyObjectLoaded(EE, Obj);
}
- virtual void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) {
+ void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) override {
ClientMM->registerEHFrames(Addr, LoadAddr, Size);
}
- virtual void deregisterEHFrames(uint8_t *Addr,
- uint64_t LoadAddr,
- size_t Size) {
+ void deregisterEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) override {
ClientMM->deregisterEHFrames(Addr, LoadAddr, Size);
}
- virtual bool finalizeMemory(std::string *ErrMsg = 0) {
+ bool finalizeMemory(std::string *ErrMsg = nullptr) override {
return ClientMM->finalizeMemory(ErrMsg);
}
private:
MCJIT *ParentEngine;
- OwningPtr<RTDyldMemoryManager> ClientMM;
+ std::unique_ptr<RTDyldMemoryManager> ClientMM;
};
// About Module states: added->loaded->finalized.
@@ -206,8 +216,10 @@ class MCJIT : public ExecutionEngine {
OwningModuleContainer OwnedModules;
- typedef DenseMap<Module *, ObjectImage *> LoadedObjectMap;
- LoadedObjectMap LoadedObjects;
+ SmallVector<object::Archive*, 2> Archives;
+
+ typedef SmallVector<ObjectImage *, 2> LoadedObjectList;
+ LoadedObjectList LoadedObjects;
// An optional ObjectCache to be notified of compiled objects and used to
// perform lookup of pre-compiled code to avoid re-compilation.
@@ -226,18 +238,24 @@ public:
/// @name ExecutionEngine interface implementation
/// @{
- virtual void addModule(Module *M);
- virtual bool removeModule(Module *M);
+ void addModule(Module *M) override;
+ void addObjectFile(std::unique_ptr<object::ObjectFile> O) override;
+ void addArchive(object::Archive *O) override;
+ bool removeModule(Module *M) override;
/// FindFunctionNamed - Search all of the active modules to find the one that
/// defines FnName. This is very slow operation and shouldn't be used for
/// general code.
- virtual Function *FindFunctionNamed(const char *FnName);
+ Function *FindFunctionNamed(const char *FnName) override;
/// Sets the object manager that MCJIT should use to avoid compilation.
- virtual void setObjectCache(ObjectCache *manager);
+ void setObjectCache(ObjectCache *manager) override;
+
+ void setProcessAllSections(bool ProcessAllSections) override {
+ Dyld.setProcessAllSections(ProcessAllSections);
+ }
- virtual void generateCodeForModule(Module *M);
+ void generateCodeForModule(Module *M) override;
/// finalizeObject - ensure the module is fully processed and is usable.
///
@@ -248,7 +266,7 @@ public:
/// object.
/// Is it OK to finalize a set of modules, add modules and finalize again.
// FIXME: Do we really need both of these?
- virtual void finalizeObject();
+ void finalizeObject() override;
virtual void finalizeModule(Module *);
void finalizeLoadedModules();
@@ -256,18 +274,18 @@ public:
/// the static constructors or destructors for a program.
///
/// \param isDtors - Run the destructors instead of constructors.
- void runStaticConstructorsDestructors(bool isDtors);
+ void runStaticConstructorsDestructors(bool isDtors) override;
- virtual void *getPointerToBasicBlock(BasicBlock *BB);
+ void *getPointerToBasicBlock(BasicBlock *BB) override;
- virtual void *getPointerToFunction(Function *F);
+ void *getPointerToFunction(Function *F) override;
- virtual void *recompileAndRelinkFunction(Function *F);
+ void *recompileAndRelinkFunction(Function *F) override;
- virtual void freeMachineCodeForFunction(Function *F);
+ void freeMachineCodeForFunction(Function *F) override;
- virtual GenericValue runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues);
+ GenericValue runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues) override;
/// getPointerToNamedFunction - This method returns the address of the
/// specified function by using the dlsym function call. As such it is only
@@ -277,25 +295,27 @@ public:
/// found, this function silently returns a null pointer. Otherwise,
/// it prints a message to stderr and aborts.
///
- virtual void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true);
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) override;
/// mapSectionAddress - map a section to its target address space value.
/// Map the address of a JIT section as returned from the memory manager
/// to the address in the target process as the running code will see it.
/// This is the address which will be used for relocation resolution.
- virtual void mapSectionAddress(const void *LocalAddress,
- uint64_t TargetAddress) {
+ void mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) override {
Dyld.mapSectionAddress(LocalAddress, TargetAddress);
}
- virtual void RegisterJITEventListener(JITEventListener *L);
- virtual void UnregisterJITEventListener(JITEventListener *L);
+ void RegisterJITEventListener(JITEventListener *L) override;
+ void UnregisterJITEventListener(JITEventListener *L) override;
// If successful, these function will implicitly finalize all loaded objects.
// To get a function address within MCJIT without causing a finalize, use
// getSymbolAddress.
- virtual uint64_t getGlobalValueAddress(const std::string &Name);
- virtual uint64_t getFunctionAddress(const std::string &Name);
+ uint64_t getGlobalValueAddress(const std::string &Name) override;
+ uint64_t getFunctionAddress(const std::string &Name) override;
+
+ TargetMachine *getTargetMachine() override { return TM; }
/// @}
/// @name (Private) Registration Interfaces
@@ -322,7 +342,7 @@ protected:
/// emitObject -- Generate a JITed object in memory from the specified module
/// Currently, MCJIT only supports a single module and the module passed to
/// this function call is expected to be the contained module. The module
- /// is passed as a parameter here to prepare for multiple module support in
+ /// is passed as a parameter here to prepare for multiple module support in
/// the future.
ObjectBufferStream* emitObject(Module *M);
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp
index cf90e77..5986084 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp
@@ -71,15 +71,15 @@ uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup,
//
// FIXME: Initialize the Near member for each memory group to avoid
// interleaving.
- error_code ec;
+ std::error_code ec;
sys::MemoryBlock MB = sys::Memory::allocateMappedMemory(RequiredSize,
&MemGroup.Near,
sys::Memory::MF_READ |
sys::Memory::MF_WRITE,
ec);
if (ec) {
- // FIXME: Add error propogation to the interface.
- return NULL;
+ // FIXME: Add error propagation to the interface.
+ return nullptr;
}
// Save this address as the basis for our next request
@@ -105,7 +105,7 @@ uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup,
bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg)
{
// FIXME: Should in-progress permissions be reverted if an error occurs?
- error_code ec;
+ std::error_code ec;
// Don't allow free memory blocks to be used after setting protection flags.
CodeMem.FreeMem.clear();
@@ -143,19 +143,20 @@ bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg)
return false;
}
-error_code SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
- unsigned Permissions) {
+std::error_code
+SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
+ unsigned Permissions) {
for (int i = 0, e = MemGroup.AllocatedMem.size(); i != e; ++i) {
- error_code ec;
- ec = sys::Memory::protectMappedMemory(MemGroup.AllocatedMem[i],
- Permissions);
- if (ec) {
- return ec;
- }
+ std::error_code ec;
+ ec =
+ sys::Memory::protectMappedMemory(MemGroup.AllocatedMem[i], Permissions);
+ if (ec) {
+ return ec;
+ }
}
- return error_code::success();
+ return std::error_code();
}
void SectionMemoryManager::invalidateInstructionCache() {
diff --git a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
index f11df82..fd37a13 100644
--- a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
@@ -15,10 +15,8 @@
#include "llvm/Config/config.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
-#define DEBUG_TYPE "oprofile-jit-event-listener"
-#include "llvm/DebugInfo.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/ExecutionEngine/ObjectImage.h"
#include "llvm/ExecutionEngine/OProfileWrapper.h"
@@ -34,6 +32,8 @@
using namespace llvm;
using namespace llvm::jitprofiling;
+#define DEBUG_TYPE "oprofile-jit-event-listener"
+
namespace {
class OProfileJITEventListener : public JITEventListener {
@@ -171,11 +171,8 @@ void OProfileJITEventListener::NotifyObjectEmitted(const ObjectImage &Obj) {
}
// Use symbol info to iterate functions in the object.
- error_code ec;
- for (object::symbol_iterator I = Obj.begin_symbols(),
- E = Obj.end_symbols();
- I != E && !ec;
- I.increment(ec)) {
+ for (object::symbol_iterator I = Obj.begin_symbols(), E = Obj.end_symbols();
+ I != E; ++I) {
object::SymbolRef::Type SymType;
if (I->getType(SymType)) continue;
if (SymType == object::SymbolRef::ST_Function) {
@@ -204,11 +201,8 @@ void OProfileJITEventListener::NotifyFreeingObject(const ObjectImage &Obj) {
}
// Use symbol info to iterate functions in the object.
- error_code ec;
- for (object::symbol_iterator I = Obj.begin_symbols(),
- E = Obj.end_symbols();
- I != E && !ec;
- I.increment(ec)) {
+ for (object::symbol_iterator I = Obj.begin_symbols(), E = Obj.end_symbols();
+ I != E; ++I) {
object::SymbolRef::Type SymType;
if (I->getType(SymType)) continue;
if (SymType == object::SymbolRef::ST_Function) {
@@ -229,7 +223,8 @@ void OProfileJITEventListener::NotifyFreeingObject(const ObjectImage &Obj) {
namespace llvm {
JITEventListener *JITEventListener::createOProfileJITEventListener() {
- static OwningPtr<OProfileWrapper> JITProfilingWrapper(new OProfileWrapper);
+ static std::unique_ptr<OProfileWrapper> JITProfilingWrapper(
+ new OProfileWrapper);
return new OProfileJITEventListener(*JITProfilingWrapper);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
index 61d8dc2..04edbd2 100644
--- a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
@@ -13,14 +13,13 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "oprofile-wrapper"
#include "llvm/ExecutionEngine/OProfileWrapper.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
-#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
#include <cstring>
#include <dirent.h>
#include <fcntl.h>
@@ -29,6 +28,8 @@
#include <sys/stat.h>
#include <unistd.h>
+#define DEBUG_TYPE "oprofile-wrapper"
+
namespace {
// Global mutex to ensure a single thread initializes oprofile agent.
diff --git a/contrib/llvm/lib/ExecutionEngine/RTDyldMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/RTDyldMemoryManager.cpp
index 26e1fdd..1646937 100644
--- a/contrib/llvm/lib/ExecutionEngine/RTDyldMemoryManager.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RTDyldMemoryManager.cpp
@@ -15,7 +15,6 @@
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
-
#include <cstdlib>
#ifdef __linux__
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp
index 603c526..8546571 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp
@@ -13,6 +13,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
+#include "llvm/Support/ManagedStatic.h"
using namespace llvm;
@@ -44,10 +45,16 @@ extern "C" {
// We put information about the JITed function in this global, which the
// debugger reads. Make sure to specify the version statically, because the
// debugger checks the version before we can set it during runtime.
- struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
+ struct jit_descriptor __jit_debug_descriptor = { 1, 0, nullptr, nullptr };
// Debuggers puts a breakpoint in this function.
- LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() { }
+ LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() {
+ // The noinline and the asm prevent calls to this function from being
+ // optimized out.
+#if !defined(_MSC_VER)
+ asm volatile("":::"memory");
+#endif
+ }
}
@@ -78,12 +85,12 @@ public:
/// Creates an entry in the JIT registry for the buffer @p Object,
/// which must contain an object file in executable memory with any
/// debug information for the debugger.
- void registerObject(const ObjectBuffer &Object);
+ void registerObject(const ObjectBuffer &Object) override;
/// Removes the internal registration of @p Object, and
/// frees associated resources.
/// Returns true if @p Object was found in ObjectBufferMap.
- bool deregisterObject(const ObjectBuffer &Object);
+ bool deregisterObject(const ObjectBuffer &Object) override;
private:
/// Deregister the debug info for the given object file from the debugger
@@ -96,16 +103,15 @@ private:
/// modify global variables.
llvm::sys::Mutex JITDebugLock;
-/// Acquire the lock and do the registration.
+/// Do the registration.
void NotifyDebugger(jit_code_entry* JITCodeEntry) {
- llvm::MutexGuard locked(JITDebugLock);
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
// Insert this entry at the head of the list.
- JITCodeEntry->prev_entry = NULL;
+ JITCodeEntry->prev_entry = nullptr;
jit_code_entry* NextEntry = __jit_debug_descriptor.first_entry;
JITCodeEntry->next_entry = NextEntry;
- if (NextEntry != NULL) {
+ if (NextEntry) {
NextEntry->prev_entry = JITCodeEntry;
}
__jit_debug_descriptor.first_entry = JITCodeEntry;
@@ -115,7 +121,8 @@ void NotifyDebugger(jit_code_entry* JITCodeEntry) {
GDBJITRegistrar::~GDBJITRegistrar() {
// Free all registered object files.
- for (RegisteredObjectBufferMap::iterator I = ObjectBufferMap.begin(), E = ObjectBufferMap.end();
+ llvm::MutexGuard locked(JITDebugLock);
+ for (RegisteredObjectBufferMap::iterator I = ObjectBufferMap.begin(), E = ObjectBufferMap.end();
I != E; ++I) {
// Call the private method that doesn't update the map so our iterator
// doesn't break.
@@ -130,15 +137,15 @@ void GDBJITRegistrar::registerObject(const ObjectBuffer &Object) {
size_t Size = Object.getBufferSize();
assert(Buffer && "Attempt to register a null object with a debugger.");
+ llvm::MutexGuard locked(JITDebugLock);
assert(ObjectBufferMap.find(Buffer) == ObjectBufferMap.end() &&
"Second attempt to perform debug registration.");
jit_code_entry* JITCodeEntry = new jit_code_entry();
- if (JITCodeEntry == 0) {
+ if (!JITCodeEntry) {
llvm::report_fatal_error(
"Allocation failed when registering a JIT entry!\n");
- }
- else {
+ } else {
JITCodeEntry->symfile_addr = Buffer;
JITCodeEntry->symfile_size = Size;
@@ -149,6 +156,7 @@ void GDBJITRegistrar::registerObject(const ObjectBuffer &Object) {
bool GDBJITRegistrar::deregisterObject(const ObjectBuffer& Object) {
const char *Buffer = Object.getBufferStart();
+ llvm::MutexGuard locked(JITDebugLock);
RegisteredObjectBufferMap::iterator I = ObjectBufferMap.find(Buffer);
if (I != ObjectBufferMap.end()) {
@@ -164,9 +172,8 @@ void GDBJITRegistrar::deregisterObjectInternal(
jit_code_entry*& JITCodeEntry = I->second.second;
- // Acquire the lock and do the unregistration.
+ // Do the unregistration.
{
- llvm::MutexGuard locked(JITDebugLock);
__jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
// Remove the jit_code_entry from the linked list.
@@ -190,25 +197,17 @@ void GDBJITRegistrar::deregisterObjectInternal(
}
delete JITCodeEntry;
- JITCodeEntry = NULL;
+ JITCodeEntry = nullptr;
}
+llvm::ManagedStatic<GDBJITRegistrar> TheRegistrar;
+
} // end namespace
namespace llvm {
JITRegistrar& JITRegistrar::getGDBRegistrar() {
- static GDBJITRegistrar* sRegistrar = NULL;
- if (sRegistrar == NULL) {
- // The mutex is here so that it won't slow down access once the registrar
- // is instantiated
- llvm::MutexGuard locked(JITDebugLock);
- // Check again to be sure another thread didn't create this while we waited
- if (sRegistrar == NULL) {
- sRegistrar = new GDBJITRegistrar;
- }
- }
- return *sRegistrar;
+ return *TheRegistrar;
}
} // namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h
index 9cbde5d..c3a2182 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h
@@ -1,6 +1,6 @@
//===-- ObjectImageCommon.h - Format independent executuable object image -===//
//
-// The LLVM Compiler Infrastructure
+// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
@@ -18,21 +18,27 @@
#include "llvm/ExecutionEngine/ObjectImage.h"
#include "llvm/Object/ObjectFile.h"
+#include <memory>
+
namespace llvm {
+namespace object {
+ class ObjectFile;
+}
+
class ObjectImageCommon : public ObjectImage {
ObjectImageCommon(); // = delete
ObjectImageCommon(const ObjectImageCommon &other); // = delete
- virtual void anchor();
+ void anchor() override;
protected:
- object::ObjectFile *ObjFile;
+ std::unique_ptr<object::ObjectFile> ObjFile;
// This form of the constructor allows subclasses to use
// format-specific subclasses of ObjectFile directly
- ObjectImageCommon(ObjectBuffer *Input, object::ObjectFile *Obj)
+ ObjectImageCommon(ObjectBuffer *Input, std::unique_ptr<object::ObjectFile> Obj)
: ObjectImage(Input), // saves Input as Buffer and takes ownership
- ObjFile(Obj)
+ ObjFile(std::move(Obj))
{
}
@@ -40,40 +46,44 @@ public:
ObjectImageCommon(ObjectBuffer* Input)
: ObjectImage(Input) // saves Input as Buffer and takes ownership
{
- ObjFile = object::ObjectFile::createObjectFile(Buffer->getMemBuffer());
+ // FIXME: error checking? createObjectFile returns an ErrorOr<ObjectFile*>
+ // and should probably be checked for failure.
+ std::unique_ptr<MemoryBuffer> Buf(Buffer->getMemBuffer());
+ ObjFile.reset(object::ObjectFile::createObjectFile(Buf).get());
}
- virtual ~ObjectImageCommon() { delete ObjFile; }
+ ObjectImageCommon(std::unique_ptr<object::ObjectFile> Input)
+ : ObjectImage(nullptr), ObjFile(std::move(Input)) {}
+ virtual ~ObjectImageCommon() { }
- virtual object::symbol_iterator begin_symbols() const
- { return ObjFile->begin_symbols(); }
- virtual object::symbol_iterator end_symbols() const
- { return ObjFile->end_symbols(); }
+ object::symbol_iterator begin_symbols() const override
+ { return ObjFile->symbol_begin(); }
+ object::symbol_iterator end_symbols() const override
+ { return ObjFile->symbol_end(); }
- virtual object::section_iterator begin_sections() const
- { return ObjFile->begin_sections(); }
- virtual object::section_iterator end_sections() const
- { return ObjFile->end_sections(); }
+ object::section_iterator begin_sections() const override
+ { return ObjFile->section_begin(); }
+ object::section_iterator end_sections() const override
+ { return ObjFile->section_end(); }
- virtual /* Triple::ArchType */ unsigned getArch() const
- { return ObjFile->getArch(); }
+ /* Triple::ArchType */ unsigned getArch() const override
+ { return ObjFile->getArch(); }
- virtual StringRef getData() const { return ObjFile->getData(); }
+ StringRef getData() const override { return ObjFile->getData(); }
- virtual object::ObjectFile* getObjectFile() const { return ObjFile; }
+ object::ObjectFile* getObjectFile() const override { return ObjFile.get(); }
// Subclasses can override these methods to update the image with loaded
// addresses for sections and common symbols
- virtual void updateSectionAddress(const object::SectionRef &Sec,
- uint64_t Addr) {}
- virtual void updateSymbolAddress(const object::SymbolRef &Sym, uint64_t Addr)
- {}
+ void updateSectionAddress(const object::SectionRef &Sec,
+ uint64_t Addr) override {}
+ void updateSymbolAddress(const object::SymbolRef &Sym,
+ uint64_t Addr) override {}
// Subclasses can override these methods to provide JIT debugging support
- virtual void registerWithDebugger() {}
- virtual void deregisterWithDebugger() {}
+ void registerWithDebugger() override {}
+ void deregisterWithDebugger() override {}
};
} // end namespace llvm
#endif // LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H
-
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
index 161135a..d86a751 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -11,21 +11,21 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dyld"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "JITRegistrar.h"
#include "ObjectImageCommon.h"
#include "RuntimeDyldELF.h"
#include "RuntimeDyldImpl.h"
#include "RuntimeDyldMachO.h"
-#include "llvm/Support/FileSystem.h"
+#include "llvm/Object/ELF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MutexGuard.h"
-#include "llvm/Object/ELF.h"
using namespace llvm;
using namespace llvm::object;
+#define DEBUG_TYPE "dyld"
+
// Empty out-of-line virtual destructor as the key function.
RuntimeDyldImpl::~RuntimeDyldImpl() {}
@@ -36,11 +36,9 @@ void ObjectImageCommon::anchor() {}
namespace llvm {
-void RuntimeDyldImpl::registerEHFrames() {
-}
+void RuntimeDyldImpl::registerEHFrames() {}
-void RuntimeDyldImpl::deregisterEHFrames() {
-}
+void RuntimeDyldImpl::deregisterEHFrames() {}
// Resolve the relocations for all symbols we currently know about.
void RuntimeDyldImpl::resolveRelocations() {
@@ -56,9 +54,8 @@ void RuntimeDyldImpl::resolveRelocations() {
// symbol for the relocation is located. The SectionID in the relocation
// entry provides the section to which the relocation will be applied.
uint64_t Addr = Sections[i].LoadAddress;
- DEBUG(dbgs() << "Resolving relocations Section #" << i
- << "\t" << format("%p", (uint8_t *)Addr)
- << "\n");
+ DEBUG(dbgs() << "Resolving relocations Section #" << i << "\t"
+ << format("%p", (uint8_t *)Addr) << "\n");
resolveRelocationList(Relocations[i], Addr);
Relocations.erase(i);
}
@@ -76,22 +73,52 @@ void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress,
llvm_unreachable("Attempting to remap address of unknown section!");
}
-// Subclasses can implement this method to create specialized image instances.
-// The caller owns the pointer that is returned.
-ObjectImage *RuntimeDyldImpl::createObjectImage(ObjectBuffer *InputBuffer) {
- return new ObjectImageCommon(InputBuffer);
+static std::error_code getOffset(const SymbolRef &Sym, uint64_t &Result) {
+ uint64_t Address;
+ if (std::error_code EC = Sym.getAddress(Address))
+ return EC;
+
+ if (Address == UnknownAddressOrSize) {
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ }
+
+ const ObjectFile *Obj = Sym.getObject();
+ section_iterator SecI(Obj->section_begin());
+ if (std::error_code EC = Sym.getSection(SecI))
+ return EC;
+
+ if (SecI == Obj->section_end()) {
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ }
+
+ uint64_t SectionAddress;
+ if (std::error_code EC = SecI->getAddress(SectionAddress))
+ return EC;
+
+ Result = Address - SectionAddress;
+ return object_error::success;
}
-ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) {
+ObjectImage *RuntimeDyldImpl::loadObject(ObjectImage *InputObject) {
MutexGuard locked(lock);
- OwningPtr<ObjectImage> obj(createObjectImage(InputBuffer));
- if (!obj)
- report_fatal_error("Unable to create object image from memory buffer!");
+ std::unique_ptr<ObjectImage> Obj(InputObject);
+ if (!Obj)
+ return nullptr;
// Save information about our target
- Arch = (Triple::ArchType)obj->getArch();
- IsTargetLittleEndian = obj->getObjectFile()->isLittleEndian();
+ Arch = (Triple::ArchType)Obj->getArch();
+ IsTargetLittleEndian = Obj->getObjectFile()->isLittleEndian();
+
+ // Compute the memory size required to load all sections to be loaded
+ // and pass this information to the memory manager
+ if (MemMgr->needsToReserveAllocationSpace()) {
+ uint64_t CodeSize = 0, DataSizeRO = 0, DataSizeRW = 0;
+ computeTotalAllocSize(*Obj, CodeSize, DataSizeRO, DataSizeRW);
+ MemMgr->reserveAllocationSpace(CodeSize, DataSizeRO, DataSizeRW);
+ }
// Symbols found in this object
StringMap<SymbolLoc> LocalSymbols;
@@ -103,52 +130,47 @@ ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) {
// Maximum required total memory to allocate all common symbols
uint64_t CommonSize = 0;
- error_code err;
// Parse symbols
DEBUG(dbgs() << "Parse symbols:\n");
- for (symbol_iterator i = obj->begin_symbols(), e = obj->end_symbols();
- i != e; i.increment(err)) {
- Check(err);
+ for (symbol_iterator I = Obj->begin_symbols(), E = Obj->end_symbols(); I != E;
+ ++I) {
object::SymbolRef::Type SymType;
StringRef Name;
- Check(i->getType(SymType));
- Check(i->getName(Name));
+ Check(I->getType(SymType));
+ Check(I->getName(Name));
- uint32_t flags;
- Check(i->getFlags(flags));
+ uint32_t Flags = I->getFlags();
- bool isCommon = flags & SymbolRef::SF_Common;
- if (isCommon) {
+ bool IsCommon = Flags & SymbolRef::SF_Common;
+ if (IsCommon) {
// Add the common symbols to a list. We'll allocate them all below.
- uint32_t Align;
- Check(i->getAlignment(Align));
- uint64_t Size = 0;
- Check(i->getSize(Size));
- CommonSize += Size + Align;
- CommonSymbols[*i] = CommonSymbolInfo(Size, Align);
+ if (!GlobalSymbolTable.count(Name)) {
+ uint32_t Align;
+ Check(I->getAlignment(Align));
+ uint64_t Size = 0;
+ Check(I->getSize(Size));
+ CommonSize += Size + Align;
+ CommonSymbols[*I] = CommonSymbolInfo(Size, Align);
+ }
} else {
if (SymType == object::SymbolRef::ST_Function ||
SymType == object::SymbolRef::ST_Data ||
SymType == object::SymbolRef::ST_Unknown) {
- uint64_t FileOffset;
+ uint64_t SectOffset;
StringRef SectionData;
bool IsCode;
- section_iterator si = obj->end_sections();
- Check(i->getFileOffset(FileOffset));
- Check(i->getSection(si));
- if (si == obj->end_sections()) continue;
- Check(si->getContents(SectionData));
- Check(si->isText(IsCode));
- const uint8_t* SymPtr = (const uint8_t*)InputBuffer->getBufferStart() +
- (uintptr_t)FileOffset;
- uintptr_t SectOffset = (uintptr_t)(SymPtr -
- (const uint8_t*)SectionData.begin());
- unsigned SectionID = findOrEmitSection(*obj, *si, IsCode, LocalSections);
+ section_iterator SI = Obj->end_sections();
+ Check(getOffset(*I, SectOffset));
+ Check(I->getSection(SI));
+ if (SI == Obj->end_sections())
+ continue;
+ Check(SI->getContents(SectionData));
+ Check(SI->isText(IsCode));
+ unsigned SectionID =
+ findOrEmitSection(*Obj, *SI, IsCode, LocalSections);
LocalSymbols[Name.data()] = SymbolLoc(SectionID, SectOffset);
- DEBUG(dbgs() << "\tFileOffset: " << format("%p", (uintptr_t)FileOffset)
- << " flags: " << flags
- << " SID: " << SectionID
- << " Offset: " << format("%p", SectOffset));
+ DEBUG(dbgs() << "\tOffset: " << format("%p", (uintptr_t)SectOffset)
+ << " flags: " << Flags << " SID: " << SectionID);
GlobalSymbolTable[Name] = SymbolLoc(SectionID, SectOffset);
}
}
@@ -157,39 +179,179 @@ ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) {
// Allocate common symbols
if (CommonSize != 0)
- emitCommonSymbols(*obj, CommonSymbols, CommonSize, LocalSymbols);
+ emitCommonSymbols(*Obj, CommonSymbols, CommonSize, GlobalSymbolTable);
// Parse and process relocations
DEBUG(dbgs() << "Parse relocations:\n");
- for (section_iterator si = obj->begin_sections(),
- se = obj->end_sections(); si != se; si.increment(err)) {
- Check(err);
- bool isFirstRelocation = true;
+ for (section_iterator SI = Obj->begin_sections(), SE = Obj->end_sections();
+ SI != SE; ++SI) {
unsigned SectionID = 0;
StubMap Stubs;
- section_iterator RelocatedSection = si->getRelocatedSection();
-
- for (relocation_iterator i = si->begin_relocations(),
- e = si->end_relocations(); i != e; i.increment(err)) {
- Check(err);
-
- // If it's the first relocation in this section, find its SectionID
- if (isFirstRelocation) {
- SectionID =
- findOrEmitSection(*obj, *RelocatedSection, true, LocalSections);
- DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
- isFirstRelocation = false;
+ section_iterator RelocatedSection = SI->getRelocatedSection();
+
+ relocation_iterator I = SI->relocation_begin();
+ relocation_iterator E = SI->relocation_end();
+
+ if (I == E && !ProcessAllSections)
+ continue;
+
+ bool IsCode = false;
+ Check(RelocatedSection->isText(IsCode));
+ SectionID =
+ findOrEmitSection(*Obj, *RelocatedSection, IsCode, LocalSections);
+ DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
+
+ for (; I != E;)
+ I = processRelocationRef(SectionID, I, *Obj, LocalSections, LocalSymbols,
+ Stubs);
+ }
+
+ // Give the subclasses a chance to tie-up any loose ends.
+ finalizeLoad(*Obj, LocalSections);
+
+ return Obj.release();
+}
+
+// A helper method for computeTotalAllocSize.
+// Computes the memory size required to allocate sections with the given sizes,
+// assuming that all sections are allocated with the given alignment
+static uint64_t
+computeAllocationSizeForSections(std::vector<uint64_t> &SectionSizes,
+ uint64_t Alignment) {
+ uint64_t TotalSize = 0;
+ for (size_t Idx = 0, Cnt = SectionSizes.size(); Idx < Cnt; Idx++) {
+ uint64_t AlignedSize =
+ (SectionSizes[Idx] + Alignment - 1) / Alignment * Alignment;
+ TotalSize += AlignedSize;
+ }
+ return TotalSize;
+}
+
+// Compute an upper bound of the memory size that is required to load all
+// sections
+void RuntimeDyldImpl::computeTotalAllocSize(ObjectImage &Obj,
+ uint64_t &CodeSize,
+ uint64_t &DataSizeRO,
+ uint64_t &DataSizeRW) {
+ // Compute the size of all sections required for execution
+ std::vector<uint64_t> CodeSectionSizes;
+ std::vector<uint64_t> ROSectionSizes;
+ std::vector<uint64_t> RWSectionSizes;
+ uint64_t MaxAlignment = sizeof(void *);
+
+ // Collect sizes of all sections to be loaded;
+ // also determine the max alignment of all sections
+ for (section_iterator SI = Obj.begin_sections(), SE = Obj.end_sections();
+ SI != SE; ++SI) {
+ const SectionRef &Section = *SI;
+
+ bool IsRequired;
+ Check(Section.isRequiredForExecution(IsRequired));
+
+ // Consider only the sections that are required to be loaded for execution
+ if (IsRequired) {
+ uint64_t DataSize = 0;
+ uint64_t Alignment64 = 0;
+ bool IsCode = false;
+ bool IsReadOnly = false;
+ StringRef Name;
+ Check(Section.getSize(DataSize));
+ Check(Section.getAlignment(Alignment64));
+ Check(Section.isText(IsCode));
+ Check(Section.isReadOnlyData(IsReadOnly));
+ Check(Section.getName(Name));
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+
+ uint64_t StubBufSize = computeSectionStubBufSize(Obj, Section);
+ uint64_t SectionSize = DataSize + StubBufSize;
+
+ // The .eh_frame section (at least on Linux) needs an extra four bytes
+ // padded
+ // with zeroes added at the end. For MachO objects, this section has a
+ // slightly different name, so this won't have any effect for MachO
+ // objects.
+ if (Name == ".eh_frame")
+ SectionSize += 4;
+
+ if (SectionSize > 0) {
+ // save the total size of the section
+ if (IsCode) {
+ CodeSectionSizes.push_back(SectionSize);
+ } else if (IsReadOnly) {
+ ROSectionSizes.push_back(SectionSize);
+ } else {
+ RWSectionSizes.push_back(SectionSize);
+ }
+ // update the max alignment
+ if (Alignment > MaxAlignment) {
+ MaxAlignment = Alignment;
+ }
}
+ }
+ }
- processRelocationRef(SectionID, *i, *obj, LocalSections, LocalSymbols,
- Stubs);
+ // Compute the size of all common symbols
+ uint64_t CommonSize = 0;
+ for (symbol_iterator I = Obj.begin_symbols(), E = Obj.end_symbols(); I != E;
+ ++I) {
+ uint32_t Flags = I->getFlags();
+ if (Flags & SymbolRef::SF_Common) {
+ // Add the common symbols to a list. We'll allocate them all below.
+ uint64_t Size = 0;
+ Check(I->getSize(Size));
+ CommonSize += Size;
}
}
+ if (CommonSize != 0) {
+ RWSectionSizes.push_back(CommonSize);
+ }
- // Give the subclasses a chance to tie-up any loose ends.
- finalizeLoad(LocalSections);
+ // Compute the required allocation space for each different type of sections
+ // (code, read-only data, read-write data) assuming that all sections are
+ // allocated with the max alignment. Note that we cannot compute with the
+ // individual alignments of the sections, because then the required size
+ // depends on the order, in which the sections are allocated.
+ CodeSize = computeAllocationSizeForSections(CodeSectionSizes, MaxAlignment);
+ DataSizeRO = computeAllocationSizeForSections(ROSectionSizes, MaxAlignment);
+ DataSizeRW = computeAllocationSizeForSections(RWSectionSizes, MaxAlignment);
+}
- return obj.take();
+// compute stub buffer size for the given section
+unsigned RuntimeDyldImpl::computeSectionStubBufSize(ObjectImage &Obj,
+ const SectionRef &Section) {
+ unsigned StubSize = getMaxStubSize();
+ if (StubSize == 0) {
+ return 0;
+ }
+ // FIXME: this is an inefficient way to handle this. We should computed the
+ // necessary section allocation size in loadObject by walking all the sections
+ // once.
+ unsigned StubBufSize = 0;
+ for (section_iterator SI = Obj.begin_sections(), SE = Obj.end_sections();
+ SI != SE; ++SI) {
+ section_iterator RelSecI = SI->getRelocatedSection();
+ if (!(RelSecI == Section))
+ continue;
+
+ for (const RelocationRef &Reloc : SI->relocations()) {
+ (void)Reloc;
+ StubBufSize += StubSize;
+ }
+ }
+
+ // Get section data size and alignment
+ uint64_t Alignment64;
+ uint64_t DataSize;
+ Check(Section.getSize(DataSize));
+ Check(Section.getAlignment(Alignment64));
+
+ // Add stubbuf size alignment
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+ unsigned StubAlignment = getStubAlignment();
+ unsigned EndAlignment = (DataSize | Alignment) & -(DataSize | Alignment);
+ if (StubAlignment > EndAlignment)
+ StubBufSize += StubAlignment - EndAlignment;
+ return StubBufSize;
}
void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
@@ -198,22 +360,20 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
SymbolTableMap &SymbolTable) {
// Allocate memory for the section
unsigned SectionID = Sections.size();
- uint8_t *Addr = MemMgr->allocateDataSection(
- TotalSize, sizeof(void*), SectionID, StringRef(), false);
+ uint8_t *Addr = MemMgr->allocateDataSection(TotalSize, sizeof(void *),
+ SectionID, StringRef(), false);
if (!Addr)
report_fatal_error("Unable to allocate memory for common symbols!");
uint64_t Offset = 0;
Sections.push_back(SectionEntry(StringRef(), Addr, TotalSize, 0));
memset(Addr, 0, TotalSize);
- DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID
- << " new addr: " << format("%p", Addr)
- << " DataSize: " << TotalSize
- << "\n");
+ DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID << " new addr: "
+ << format("%p", Addr) << " DataSize: " << TotalSize << "\n");
// Assign the address of each symbol
for (CommonSymbolMap::const_iterator it = CommonSymbols.begin(),
- itEnd = CommonSymbols.end(); it != itEnd; it++) {
+ itEnd = CommonSymbols.end(); it != itEnd; ++it) {
uint64_t Size = it->second.first;
uint64_t Align = it->second.second;
StringRef Name;
@@ -223,8 +383,8 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
uint64_t AlignOffset = OffsetToAlignment((uint64_t)Addr, Align);
Addr += AlignOffset;
Offset += AlignOffset;
- DEBUG(dbgs() << "Allocating common symbol " << Name << " address " <<
- format("%p\n", Addr));
+ DEBUG(dbgs() << "Allocating common symbol " << Name << " address "
+ << format("%p\n", Addr));
}
Obj.updateSymbolAddress(it->first, (uint64_t)Addr);
SymbolTable[Name.data()] = SymbolLoc(SectionID, Offset);
@@ -234,30 +394,7 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
}
unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
- const SectionRef &Section,
- bool IsCode) {
-
- unsigned StubBufSize = 0,
- StubSize = getMaxStubSize();
- error_code err;
- const ObjectFile *ObjFile = Obj.getObjectFile();
- // FIXME: this is an inefficient way to handle this. We should computed the
- // necessary section allocation size in loadObject by walking all the sections
- // once.
- if (StubSize > 0) {
- for (section_iterator SI = ObjFile->begin_sections(),
- SE = ObjFile->end_sections();
- SI != SE; SI.increment(err), Check(err)) {
- section_iterator RelSecI = SI->getRelocatedSection();
- if (!(RelSecI == Section))
- continue;
-
- for (relocation_iterator I = SI->begin_relocations(),
- E = SI->end_relocations(); I != E; I.increment(err), Check(err)) {
- StubBufSize += StubSize;
- }
- }
- }
+ const SectionRef &Section, bool IsCode) {
StringRef data;
uint64_t Alignment64;
@@ -271,6 +408,7 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
bool IsReadOnly;
uint64_t DataSize;
unsigned PaddingSize = 0;
+ unsigned StubBufSize = 0;
StringRef Name;
Check(Section.isRequiredForExecution(IsRequired));
Check(Section.isVirtual(IsVirtual));
@@ -278,12 +416,8 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
Check(Section.isReadOnlyData(IsReadOnly));
Check(Section.getSize(DataSize));
Check(Section.getName(Name));
- if (StubSize > 0) {
- unsigned StubAlignment = getStubAlignment();
- unsigned EndAlignment = (DataSize | Alignment) & -(DataSize | Alignment);
- if (StubAlignment > EndAlignment)
- StubBufSize += StubAlignment - EndAlignment;
- }
+
+ StubBufSize = computeSectionStubBufSize(Obj, Section);
// The .eh_frame section (at least on Linux) needs an extra four bytes padded
// with zeroes added at the end. For MachO objects, this section has a
@@ -291,19 +425,19 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
if (Name == ".eh_frame")
PaddingSize = 4;
- unsigned Allocate;
+ uintptr_t Allocate;
unsigned SectionID = Sections.size();
uint8_t *Addr;
- const char *pData = 0;
+ const char *pData = nullptr;
// Some sections, such as debug info, don't need to be loaded for execution.
// Leave those where they are.
if (IsRequired) {
Allocate = DataSize + PaddingSize + StubBufSize;
- Addr = IsCode
- ? MemMgr->allocateCodeSection(Allocate, Alignment, SectionID, Name)
- : MemMgr->allocateDataSection(Allocate, Alignment, SectionID, Name,
- IsReadOnly);
+ Addr = IsCode ? MemMgr->allocateCodeSection(Allocate, Alignment, SectionID,
+ Name)
+ : MemMgr->allocateDataSection(Allocate, Alignment, SectionID,
+ Name, IsReadOnly);
if (!Addr)
report_fatal_error("Unable to allocate section memory!");
@@ -324,30 +458,22 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
DataSize += PaddingSize;
}
- DEBUG(dbgs() << "emitSection SectionID: " << SectionID
- << " Name: " << Name
+ DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
<< " obj addr: " << format("%p", pData)
<< " new addr: " << format("%p", Addr)
- << " DataSize: " << DataSize
- << " StubBufSize: " << StubBufSize
- << " Allocate: " << Allocate
- << "\n");
+ << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
Obj.updateSectionAddress(Section, (uint64_t)Addr);
- }
- else {
+ } else {
// Even if we didn't load the section, we need to record an entry for it
// to handle later processing (and by 'handle' I mean don't do anything
// with these sections).
Allocate = 0;
- Addr = 0;
- DEBUG(dbgs() << "emitSection SectionID: " << SectionID
- << " Name: " << Name
- << " obj addr: " << format("%p", data.data())
- << " new addr: 0"
- << " DataSize: " << DataSize
- << " StubBufSize: " << StubBufSize
- << " Allocate: " << Allocate
- << "\n");
+ Addr = nullptr;
+ DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
+ << " obj addr: " << format("%p", data.data()) << " new addr: 0"
+ << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
}
Sections.push_back(SectionEntry(Name, Addr, DataSize, (uintptr_t)pData));
@@ -380,8 +506,7 @@ void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
// Relocation by symbol. If the symbol is found in the global symbol table,
// create an appropriate section relocation. Otherwise, add it to
// ExternalSymbolRelocations.
- SymbolTableMap::const_iterator Loc =
- GlobalSymbolTable.find(SymbolName);
+ SymbolTableMap::const_iterator Loc = GlobalSymbolTable.find(SymbolName);
if (Loc == GlobalSymbolTable.end()) {
ExternalSymbolRelocations[SymbolName].push_back(RE);
} else {
@@ -392,12 +517,14 @@ void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
}
}
-uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
- if (Arch == Triple::aarch64) {
+uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr,
+ unsigned AbiVariant) {
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be ||
+ Arch == Triple::arm64 || Arch == Triple::arm64_be) {
// This stub has to be able to access the full address space,
// since symbol lookup won't necessarily find a handy, in-range,
// PLT stub for functions which could be anywhere.
- uint32_t *StubAddr = (uint32_t*)Addr;
+ uint32_t *StubAddr = (uint32_t *)Addr;
// Stub can use ip0 (== x16) to calculate address
*StubAddr = 0xd2e00010; // movz ip0, #:abs_g3:<addr>
@@ -411,14 +538,14 @@ uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
*StubAddr = 0xd61f0200; // br ip0
return Addr;
- } else if (Arch == Triple::arm) {
+ } else if (Arch == Triple::arm || Arch == Triple::armeb) {
// TODO: There is only ARM far stub now. We should add the Thumb stub,
// and stubs for branches Thumb - ARM and ARM - Thumb.
- uint32_t *StubAddr = (uint32_t*)Addr;
+ uint32_t *StubAddr = (uint32_t *)Addr;
*StubAddr = 0xe51ff004; // ldr pc,<label>
- return (uint8_t*)++StubAddr;
+ return (uint8_t *)++StubAddr;
} else if (Arch == Triple::mipsel || Arch == Triple::mips) {
- uint32_t *StubAddr = (uint32_t*)Addr;
+ uint32_t *StubAddr = (uint32_t *)Addr;
// 0: 3c190000 lui t9,%hi(addr).
// 4: 27390000 addiu t9,t9,%lo(addr).
// 8: 03200008 jr t9.
@@ -435,22 +562,31 @@ uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
*StubAddr = NopInstr;
return Addr;
} else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
- // PowerPC64 stub: the address points to a function descriptor
- // instead of the function itself. Load the function address
- // on r11 and sets it to control register. Also loads the function
- // TOC in r2 and environment pointer to r11.
+ // Depending on which version of the ELF ABI is in use, we need to
+ // generate one of two variants of the stub. They both start with
+ // the same sequence to load the target address into r12.
writeInt32BE(Addr, 0x3D800000); // lis r12, highest(addr)
writeInt32BE(Addr+4, 0x618C0000); // ori r12, higher(addr)
writeInt32BE(Addr+8, 0x798C07C6); // sldi r12, r12, 32
writeInt32BE(Addr+12, 0x658C0000); // oris r12, r12, h(addr)
writeInt32BE(Addr+16, 0x618C0000); // ori r12, r12, l(addr)
- writeInt32BE(Addr+20, 0xF8410028); // std r2, 40(r1)
- writeInt32BE(Addr+24, 0xE96C0000); // ld r11, 0(r12)
- writeInt32BE(Addr+28, 0xE84C0008); // ld r2, 0(r12)
- writeInt32BE(Addr+32, 0x7D6903A6); // mtctr r11
- writeInt32BE(Addr+36, 0xE96C0010); // ld r11, 16(r2)
- writeInt32BE(Addr+40, 0x4E800420); // bctr
-
+ if (AbiVariant == 2) {
+ // PowerPC64 stub ELFv2 ABI: The address points to the function itself.
+ // The address is already in r12 as required by the ABI. Branch to it.
+ writeInt32BE(Addr+20, 0xF8410018); // std r2, 24(r1)
+ writeInt32BE(Addr+24, 0x7D8903A6); // mtctr r12
+ writeInt32BE(Addr+28, 0x4E800420); // bctr
+ } else {
+ // PowerPC64 stub ELFv1 ABI: The address points to a function descriptor.
+ // Load the function address on r11 and sets it to control register. Also
+ // loads the function TOC in r2 and environment pointer to r11.
+ writeInt32BE(Addr+20, 0xF8410028); // std r2, 40(r1)
+ writeInt32BE(Addr+24, 0xE96C0000); // ld r11, 0(r12)
+ writeInt32BE(Addr+28, 0xE84C0008); // ld r2, 0(r12)
+ writeInt32BE(Addr+32, 0x7D6903A6); // mtctr r11
+ writeInt32BE(Addr+36, 0xE96C0010); // ld r11, 16(r2)
+ writeInt32BE(Addr+40, 0x4E800420); // bctr
+ }
return Addr;
} else if (Arch == Triple::systemz) {
writeInt16BE(Addr, 0xC418); // lgrl %r1,.+8
@@ -463,6 +599,8 @@ uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
*Addr = 0xFF; // jmp
*(Addr+1) = 0x25; // rip
// 32-bit PC-relative address of the GOT entry will be stored at Addr+2
+ } else if (Arch == Triple::x86) {
+ *Addr = 0xE9; // 32-bit pc-relative jump.
}
return Addr;
}
@@ -489,36 +627,37 @@ void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs,
for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
const RelocationEntry &RE = Relocs[i];
// Ignore relocations for sections that were not loaded
- if (Sections[RE.SectionID].Address == 0)
+ if (Sections[RE.SectionID].Address == nullptr)
continue;
resolveRelocation(RE, Value);
}
}
void RuntimeDyldImpl::resolveExternalSymbols() {
- while(!ExternalSymbolRelocations.empty()) {
+ while (!ExternalSymbolRelocations.empty()) {
StringMap<RelocationList>::iterator i = ExternalSymbolRelocations.begin();
StringRef Name = i->first();
if (Name.size() == 0) {
// This is an absolute symbol, use an address of zero.
- DEBUG(dbgs() << "Resolving absolute relocations." << "\n");
+ DEBUG(dbgs() << "Resolving absolute relocations."
+ << "\n");
RelocationList &Relocs = i->second;
resolveRelocationList(Relocs, 0);
} else {
uint64_t Addr = 0;
SymbolTableMap::const_iterator Loc = GlobalSymbolTable.find(Name);
if (Loc == GlobalSymbolTable.end()) {
- // This is an external symbol, try to get its address from
- // MemoryManager.
- Addr = MemMgr->getSymbolAddress(Name.data());
- // The call to getSymbolAddress may have caused additional modules to
- // be loaded, which may have added new entries to the
- // ExternalSymbolRelocations map. Consquently, we need to update our
- // iterator. This is also why retrieval of the relocation list
- // associated with this symbol is deferred until below this point.
- // New entries may have been added to the relocation list.
- i = ExternalSymbolRelocations.find(Name);
+ // This is an external symbol, try to get its address from
+ // MemoryManager.
+ Addr = MemMgr->getSymbolAddress(Name.data());
+ // The call to getSymbolAddress may have caused additional modules to
+ // be loaded, which may have added new entries to the
+ // ExternalSymbolRelocations map. Consquently, we need to update our
+ // iterator. This is also why retrieval of the relocation list
+ // associated with this symbol is deferred until below this point.
+ // New entries may have been added to the relocation list.
+ i = ExternalSymbolRelocations.find(Name);
} else {
// We found the symbol in our global table. It was probably in a
// Module that we loaded previously.
@@ -529,12 +668,11 @@ void RuntimeDyldImpl::resolveExternalSymbols() {
// FIXME: Implement error handling that doesn't kill the host program!
if (!Addr)
report_fatal_error("Program used external function '" + Name +
- "' which could not be resolved!");
+ "' which could not be resolved!");
updateGOTEntries(Name, Addr);
- DEBUG(dbgs() << "Resolving relocations Name: " << Name
- << "\t" << format("0x%lx", Addr)
- << "\n");
+ DEBUG(dbgs() << "Resolving relocations Name: " << Name << "\t"
+ << format("0x%lx", Addr) << "\n");
// This list may have been updated when we called getSymbolAddress, so
// don't change this code to get the list earlier.
RelocationList &Relocs = i->second;
@@ -545,7 +683,6 @@ void RuntimeDyldImpl::resolveExternalSymbols() {
}
}
-
//===----------------------------------------------------------------------===//
// RuntimeDyld class implementation
RuntimeDyld::RuntimeDyld(RTDyldMemoryManager *mm) {
@@ -555,58 +692,103 @@ RuntimeDyld::RuntimeDyld(RTDyldMemoryManager *mm) {
// though the public class spawns a new 'impl' instance for each load,
// they share a single memory manager. This can become a problem when page
// permissions are applied.
- Dyld = 0;
+ Dyld = nullptr;
MM = mm;
+ ProcessAllSections = false;
+}
+
+RuntimeDyld::~RuntimeDyld() { delete Dyld; }
+
+static std::unique_ptr<RuntimeDyldELF>
+createRuntimeDyldELF(RTDyldMemoryManager *MM, bool ProcessAllSections) {
+ std::unique_ptr<RuntimeDyldELF> Dyld(new RuntimeDyldELF(MM));
+ Dyld->setProcessAllSections(ProcessAllSections);
+ return Dyld;
}
-RuntimeDyld::~RuntimeDyld() {
- delete Dyld;
+static std::unique_ptr<RuntimeDyldMachO>
+createRuntimeDyldMachO(Triple::ArchType Arch, RTDyldMemoryManager *MM,
+ bool ProcessAllSections) {
+ std::unique_ptr<RuntimeDyldMachO> Dyld(RuntimeDyldMachO::create(Arch, MM));
+ Dyld->setProcessAllSections(ProcessAllSections);
+ return Dyld;
+}
+
+ObjectImage *RuntimeDyld::loadObject(std::unique_ptr<ObjectFile> InputObject) {
+ std::unique_ptr<ObjectImage> InputImage;
+
+ ObjectFile &Obj = *InputObject;
+
+ if (InputObject->isELF()) {
+ InputImage.reset(RuntimeDyldELF::createObjectImageFromFile(std::move(InputObject)));
+ if (!Dyld)
+ Dyld = createRuntimeDyldELF(MM, ProcessAllSections).release();
+ } else if (InputObject->isMachO()) {
+ InputImage.reset(RuntimeDyldMachO::createObjectImageFromFile(std::move(InputObject)));
+ if (!Dyld)
+ Dyld = createRuntimeDyldMachO(
+ static_cast<Triple::ArchType>(InputImage->getArch()),
+ MM, ProcessAllSections).release();
+ } else
+ report_fatal_error("Incompatible object format!");
+
+ if (!Dyld->isCompatibleFile(&Obj))
+ report_fatal_error("Incompatible object format!");
+
+ Dyld->loadObject(InputImage.get());
+ return InputImage.release();
}
ObjectImage *RuntimeDyld::loadObject(ObjectBuffer *InputBuffer) {
- if (!Dyld) {
- sys::fs::file_magic Type =
- sys::fs::identify_magic(InputBuffer->getBuffer());
- switch (Type) {
- case sys::fs::file_magic::elf_relocatable:
- case sys::fs::file_magic::elf_executable:
- case sys::fs::file_magic::elf_shared_object:
- case sys::fs::file_magic::elf_core:
- Dyld = new RuntimeDyldELF(MM);
- break;
- case sys::fs::file_magic::macho_object:
- case sys::fs::file_magic::macho_executable:
- case sys::fs::file_magic::macho_fixed_virtual_memory_shared_lib:
- case sys::fs::file_magic::macho_core:
- case sys::fs::file_magic::macho_preload_executable:
- case sys::fs::file_magic::macho_dynamically_linked_shared_lib:
- case sys::fs::file_magic::macho_dynamic_linker:
- case sys::fs::file_magic::macho_bundle:
- case sys::fs::file_magic::macho_dynamically_linked_shared_lib_stub:
- case sys::fs::file_magic::macho_dsym_companion:
- Dyld = new RuntimeDyldMachO(MM);
- break;
- case sys::fs::file_magic::unknown:
- case sys::fs::file_magic::bitcode:
- case sys::fs::file_magic::archive:
- case sys::fs::file_magic::coff_object:
- case sys::fs::file_magic::coff_import_library:
- case sys::fs::file_magic::pecoff_executable:
- case sys::fs::file_magic::macho_universal_binary:
- case sys::fs::file_magic::windows_resource:
- report_fatal_error("Incompatible object format!");
- }
- } else {
- if (!Dyld->isCompatibleFormat(InputBuffer))
- report_fatal_error("Incompatible object format!");
+ std::unique_ptr<ObjectImage> InputImage;
+ sys::fs::file_magic Type = sys::fs::identify_magic(InputBuffer->getBuffer());
+
+ switch (Type) {
+ case sys::fs::file_magic::elf_relocatable:
+ case sys::fs::file_magic::elf_executable:
+ case sys::fs::file_magic::elf_shared_object:
+ case sys::fs::file_magic::elf_core:
+ InputImage.reset(RuntimeDyldELF::createObjectImage(InputBuffer));
+ if (!Dyld)
+ Dyld = createRuntimeDyldELF(MM, ProcessAllSections).release();
+ break;
+ case sys::fs::file_magic::macho_object:
+ case sys::fs::file_magic::macho_executable:
+ case sys::fs::file_magic::macho_fixed_virtual_memory_shared_lib:
+ case sys::fs::file_magic::macho_core:
+ case sys::fs::file_magic::macho_preload_executable:
+ case sys::fs::file_magic::macho_dynamically_linked_shared_lib:
+ case sys::fs::file_magic::macho_dynamic_linker:
+ case sys::fs::file_magic::macho_bundle:
+ case sys::fs::file_magic::macho_dynamically_linked_shared_lib_stub:
+ case sys::fs::file_magic::macho_dsym_companion:
+ InputImage.reset(RuntimeDyldMachO::createObjectImage(InputBuffer));
+ if (!Dyld)
+ Dyld = createRuntimeDyldMachO(
+ static_cast<Triple::ArchType>(InputImage->getArch()),
+ MM, ProcessAllSections).release();
+ break;
+ case sys::fs::file_magic::unknown:
+ case sys::fs::file_magic::bitcode:
+ case sys::fs::file_magic::archive:
+ case sys::fs::file_magic::coff_object:
+ case sys::fs::file_magic::coff_import_library:
+ case sys::fs::file_magic::pecoff_executable:
+ case sys::fs::file_magic::macho_universal_binary:
+ case sys::fs::file_magic::windows_resource:
+ report_fatal_error("Incompatible object format!");
}
- return Dyld->loadObject(InputBuffer);
+ if (!Dyld->isCompatibleFormat(InputBuffer))
+ report_fatal_error("Incompatible object format!");
+
+ Dyld->loadObject(InputImage.get());
+ return InputImage.release();
}
void *RuntimeDyld::getSymbolAddress(StringRef Name) {
if (!Dyld)
- return NULL;
+ return nullptr;
return Dyld->getSymbolAddress(Name);
}
@@ -616,12 +798,9 @@ uint64_t RuntimeDyld::getSymbolLoadAddress(StringRef Name) {
return Dyld->getSymbolLoadAddress(Name);
}
-void RuntimeDyld::resolveRelocations() {
- Dyld->resolveRelocations();
-}
+void RuntimeDyld::resolveRelocations() { Dyld->resolveRelocations(); }
-void RuntimeDyld::reassignSectionAddress(unsigned SectionID,
- uint64_t Addr) {
+void RuntimeDyld::reassignSectionAddress(unsigned SectionID, uint64_t Addr) {
Dyld->reassignSectionAddress(SectionID, Addr);
}
@@ -630,9 +809,9 @@ void RuntimeDyld::mapSectionAddress(const void *LocalAddress,
Dyld->mapSectionAddress(LocalAddress, TargetAddress);
}
-StringRef RuntimeDyld::getErrorString() {
- return Dyld->getErrorString();
-}
+bool RuntimeDyld::hasError() { return Dyld->hasError(); }
+
+StringRef RuntimeDyld::getErrorString() { return Dyld->getErrorString(); }
void RuntimeDyld::registerEHFrames() {
if (Dyld)
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
new file mode 100644
index 0000000..1e63d92
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -0,0 +1,656 @@
+//===--- RuntimeDyldChecker.cpp - RuntimeDyld tester framework --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/StringRefMemoryObject.h"
+#include "RuntimeDyldImpl.h"
+#include <cctype>
+#include <memory>
+
+#define DEBUG_TYPE "rtdyld"
+
+using namespace llvm;
+
+namespace llvm {
+
+ // Helper class that implements the language evaluated by RuntimeDyldChecker.
+ class RuntimeDyldCheckerExprEval {
+ public:
+
+ RuntimeDyldCheckerExprEval(const RuntimeDyldChecker &Checker,
+ llvm::raw_ostream &ErrStream)
+ : Checker(Checker), ErrStream(ErrStream) {}
+
+ bool evaluate(StringRef Expr) const {
+ // Expect equality expression of the form 'LHS = RHS'.
+ Expr = Expr.trim();
+ size_t EQIdx = Expr.find('=');
+
+ // Evaluate LHS.
+ StringRef LHSExpr = Expr.substr(0, EQIdx).rtrim();
+ StringRef RemainingExpr;
+ EvalResult LHSResult;
+ std::tie(LHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(LHSExpr));
+ if (LHSResult.hasError())
+ return handleError(Expr, LHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, LHSExpr, ""));
+
+ // Evaluate RHS.
+ StringRef RHSExpr = Expr.substr(EQIdx + 1).ltrim();
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(RHSExpr));
+ if (RHSResult.hasError())
+ return handleError(Expr, RHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, RHSExpr, ""));
+
+ if (LHSResult.getValue() != RHSResult.getValue()) {
+ ErrStream << "Expression '" << Expr << "' is false: "
+ << format("0x%lx", LHSResult.getValue()) << " != "
+ << format("0x%lx", RHSResult.getValue()) << "\n";
+ return false;
+ }
+ return true;
+ }
+
+ private:
+ const RuntimeDyldChecker &Checker;
+ llvm::raw_ostream &ErrStream;
+
+ enum class BinOpToken : unsigned { Invalid, Add, Sub, BitwiseAnd,
+ BitwiseOr, ShiftLeft, ShiftRight };
+
+ class EvalResult {
+ public:
+ EvalResult()
+ : Value(0), ErrorMsg("") {}
+ EvalResult(uint64_t Value)
+ : Value(Value), ErrorMsg("") {}
+ EvalResult(std::string ErrorMsg)
+ : Value(0), ErrorMsg(ErrorMsg) {}
+ uint64_t getValue() const { return Value; }
+ bool hasError() const { return ErrorMsg != ""; }
+ const std::string& getErrorMsg() const { return ErrorMsg; }
+ private:
+ uint64_t Value;
+ std::string ErrorMsg;
+ };
+
+ StringRef getTokenForError(StringRef Expr) const {
+ if (Expr.empty())
+ return "";
+
+ StringRef Token, Remaining;
+ if (isalpha(Expr[0]))
+ std::tie(Token, Remaining) = parseSymbol(Expr);
+ else if (isdigit(Expr[0]))
+ std::tie(Token, Remaining) = parseNumberString(Expr);
+ else {
+ unsigned TokLen = 1;
+ if (Expr.startswith("<<") || Expr.startswith(">>"))
+ TokLen = 2;
+ Token = Expr.substr(0, TokLen);
+ }
+ return Token;
+ }
+
+ EvalResult unexpectedToken(StringRef TokenStart,
+ StringRef SubExpr,
+ StringRef ErrText) const {
+ std::string ErrorMsg("Encountered unexpected token '");
+ ErrorMsg += getTokenForError(TokenStart);
+ if (SubExpr != "") {
+ ErrorMsg += "' while parsing subexpression '";
+ ErrorMsg += SubExpr;
+ }
+ ErrorMsg += "'";
+ if (ErrText != "") {
+ ErrorMsg += " ";
+ ErrorMsg += ErrText;
+ }
+ return EvalResult(std::move(ErrorMsg));
+ }
+
+ bool handleError(StringRef Expr, const EvalResult &R) const {
+ assert(R.hasError() && "Not an error result.");
+ ErrStream << "Error evaluating expression '" << Expr << "': "
+ << R.getErrorMsg() << "\n";
+ return false;
+ }
+
+ std::pair<BinOpToken, StringRef> parseBinOpToken(StringRef Expr) const {
+ if (Expr.empty())
+ return std::make_pair(BinOpToken::Invalid, "");
+
+ // Handle the two 2-character tokens.
+ if (Expr.startswith("<<"))
+ return std::make_pair(BinOpToken::ShiftLeft,
+ Expr.substr(2).ltrim());
+ if (Expr.startswith(">>"))
+ return std::make_pair(BinOpToken::ShiftRight,
+ Expr.substr(2).ltrim());
+
+ // Handle one-character tokens.
+ BinOpToken Op;
+ switch (Expr[0]) {
+ default: return std::make_pair(BinOpToken::Invalid, Expr);
+ case '+': Op = BinOpToken::Add; break;
+ case '-': Op = BinOpToken::Sub; break;
+ case '&': Op = BinOpToken::BitwiseAnd; break;
+ case '|': Op = BinOpToken::BitwiseOr; break;
+ }
+
+ return std::make_pair(Op, Expr.substr(1).ltrim());
+ }
+
+ EvalResult computeBinOpResult(BinOpToken Op, const EvalResult &LHSResult,
+ const EvalResult &RHSResult) const {
+ switch (Op) {
+ default: llvm_unreachable("Tried to evaluate unrecognized operation.");
+ case BinOpToken::Add:
+ return EvalResult(LHSResult.getValue() + RHSResult.getValue());
+ case BinOpToken::Sub:
+ return EvalResult(LHSResult.getValue() - RHSResult.getValue());
+ case BinOpToken::BitwiseAnd:
+ return EvalResult(LHSResult.getValue() & RHSResult.getValue());
+ case BinOpToken::BitwiseOr:
+ return EvalResult(LHSResult.getValue() | RHSResult.getValue());
+ case BinOpToken::ShiftLeft:
+ return EvalResult(LHSResult.getValue() << RHSResult.getValue());
+ case BinOpToken::ShiftRight:
+ return EvalResult(LHSResult.getValue() >> RHSResult.getValue());
+ }
+ }
+
+ // Parse a symbol and return a (string, string) pair representing the symbol
+ // name and expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseSymbol(StringRef Expr) const {
+ size_t FirstNonSymbol =
+ Expr.find_first_not_of("0123456789"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ ":_");
+ return std::make_pair(Expr.substr(0, FirstNonSymbol),
+ Expr.substr(FirstNonSymbol).ltrim());
+ }
+
+ // Evaluate a call to decode_operand. Decode the instruction operand at the
+ // given symbol and get the value of the requested operand.
+ // Returns an error if the instruction cannot be decoded, or the requested
+ // operand is not an immediate.
+ // On success, retuns a pair containing the value of the operand, plus
+ // the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalDecodeOperand(StringRef Expr) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(EvalResult(("Cannot decode unknown symbol '" +
+ Symbol + "'").str()),
+ "");
+
+ if (!RemainingExpr.startswith(","))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected ','"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult OpIdxExpr;
+ std::tie(OpIdxExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (OpIdxExpr.hasError())
+ return std::make_pair(OpIdxExpr, "");
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected ')'"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t Size;
+ if (!decodeInst(Symbol, Inst, Size))
+ return std::make_pair(EvalResult(("Couldn't decode instruction at '" +
+ Symbol + "'").str()),
+ "");
+
+ unsigned OpIdx = OpIdxExpr.getValue();
+ if (OpIdx >= Inst.getNumOperands()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Invalid operand index '" << format("%i", OpIdx)
+ << "' for instruction '" << Symbol
+ << "'. Instruction has only "
+ << format("%i", Inst.getNumOperands())
+ << " operands.\nInstruction is:\n ";
+ Inst.dump_pretty(ErrMsgStream,
+ Checker.Disassembler->getContext().getAsmInfo(),
+ Checker.InstPrinter);
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ }
+
+ const MCOperand &Op = Inst.getOperand(OpIdx);
+ if (!Op.isImm()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Operand '" << format("%i", OpIdx)
+ << "' of instruction '" << Symbol
+ << "' is not an immediate.\nInstruction is:\n ";
+ Inst.dump_pretty(ErrMsgStream,
+ Checker.Disassembler->getContext().getAsmInfo(),
+ Checker.InstPrinter);
+
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ }
+
+ return std::make_pair(EvalResult(Op.getImm()), RemainingExpr);
+ }
+
+ // Evaluate a call to next_pc. Decode the instruction at the given
+ // symbol and return the following program counter..
+ // Returns an error if the instruction cannot be decoded.
+ // On success, returns a pair containing the next PC, plus the length of the
+ // expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalNextPC(StringRef Expr) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(EvalResult(("Cannot decode unknown symbol '"
+ + Symbol + "'").str()),
+ "");
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected ')'"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t Size;
+ if (!decodeInst(Symbol, Inst, Size))
+ return std::make_pair(EvalResult(("Couldn't decode instruction at '" +
+ Symbol + "'").str()),
+ "");
+ uint64_t NextPC = Checker.getSymbolAddress(Symbol) + Size;
+
+ return std::make_pair(EvalResult(NextPC), RemainingExpr);
+ }
+
+ // Evaluate an identiefer expr, which may be a symbol, or a call to
+ // one of the builtin functions: get_insn_opcode or get_insn_length.
+ // Return the result, plus the expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalIdentifierExpr(StringRef Expr) const {
+ StringRef Symbol;
+ StringRef RemainingExpr;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(Expr);
+
+ // Check for builtin function calls.
+ if (Symbol == "decode_operand")
+ return evalDecodeOperand(RemainingExpr);
+ else if (Symbol == "next_pc")
+ return evalNextPC(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol)) {
+ std::string ErrMsg("No known address for symbol '");
+ ErrMsg += Symbol;
+ ErrMsg += "'";
+ if (Symbol.startswith("L"))
+ ErrMsg += " (this appears to be an assembler local label - "
+ " perhaps drop the 'L'?)";
+
+ return std::make_pair(EvalResult(ErrMsg), "");
+ }
+
+ // Looks like a plain symbol reference.
+ return std::make_pair(EvalResult(Checker.getSymbolAddress(Symbol)),
+ RemainingExpr);
+ }
+
+ // Parse a number (hexadecimal or decimal) and return a (string, string)
+ // pair representing the number and the expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseNumberString(StringRef Expr) const {
+ size_t FirstNonDigit = StringRef::npos;
+ if (Expr.startswith("0x")) {
+ FirstNonDigit = Expr.find_first_not_of("0123456789abcdefABCDEF", 2);
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ } else {
+ FirstNonDigit = Expr.find_first_not_of("0123456789");
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ }
+ return std::make_pair(Expr.substr(0, FirstNonDigit),
+ Expr.substr(FirstNonDigit));
+ }
+
+ // Evaluate a constant numeric expression (hexidecimal or decimal) and
+ // return a pair containing the result, and the expression remaining to be
+ // evaluated.
+ std::pair<EvalResult, StringRef> evalNumberExpr(StringRef Expr) const {
+ StringRef ValueStr;
+ StringRef RemainingExpr;
+ std::tie(ValueStr, RemainingExpr) = parseNumberString(Expr);
+
+ if (ValueStr.empty() || !isdigit(ValueStr[0]))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected number"),
+ "");
+ uint64_t Value;
+ ValueStr.getAsInteger(0, Value);
+ return std::make_pair(EvalResult(Value), RemainingExpr);
+ }
+
+ // Evaluate an expression of the form "(<expr>)" and return a pair
+ // containing the result of evaluating <expr>, plus the expression
+ // remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalParensExpr(StringRef Expr) const {
+ assert(Expr.startswith("(") && "Not a parenthesized expression");
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(Expr.substr(1).ltrim()));
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, "");
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(unexpectedToken(RemainingExpr, Expr,
+ "expected ')'"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate an expression in one of the following forms:
+ // *{<number>}<symbol>
+ // *{<number>}(<symbol> + <number>)
+ // *{<number>}(<symbol> - <number>)
+ // Return a pair containing the result, plus the expression remaining to be
+ // parsed.
+ std::pair<EvalResult, StringRef> evalLoadExpr(StringRef Expr) const {
+ assert(Expr.startswith("*") && "Not a load expression");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ // Parse read size.
+ if (!RemainingExpr.startswith("{"))
+ return std::make_pair(EvalResult("Expected '{' following '*'."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ EvalResult ReadSizeExpr;
+ std::tie(ReadSizeExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (ReadSizeExpr.hasError())
+ return std::make_pair(ReadSizeExpr, RemainingExpr);
+ uint64_t ReadSize = ReadSizeExpr.getValue();
+ if (ReadSize < 1 || ReadSize > 8)
+ return std::make_pair(EvalResult("Invalid size for dereference."), "");
+ if (!RemainingExpr.startswith("}"))
+ return std::make_pair(EvalResult("Missing '}' for dereference."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ // Check for '(symbol +/- constant)' form.
+ bool SymbolPlusConstant = false;
+ if (RemainingExpr.startswith("(")) {
+ SymbolPlusConstant = true;
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ }
+
+ // Read symbol.
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(EvalResult(("Cannot dereference unknown symbol '"
+ + Symbol + "'").str()),
+ "");
+
+ // Set up defaut offset.
+ int64_t Offset = 0;
+
+ // Handle "+/- constant)" portion if necessary.
+ if (SymbolPlusConstant) {
+ char OpChar = RemainingExpr[0];
+ if (OpChar != '+' && OpChar != '-')
+ return std::make_pair(EvalResult("Invalid operator in load address."),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult OffsetExpr;
+ std::tie(OffsetExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ Offset = (OpChar == '+') ?
+ OffsetExpr.getValue() : -1 * OffsetExpr.getValue();
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(EvalResult("Missing ')' in load address."),
+ "");
+
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ }
+
+ return std::make_pair(
+ EvalResult(Checker.readMemoryAtSymbol(Symbol, Offset, ReadSize)),
+ RemainingExpr);
+ }
+
+ // Evaluate a "simple" expression. This is any expression that _isn't_ an
+ // un-parenthesized binary expression.
+ //
+ // "Simple" expressions can be optionally bit-sliced. See evalSlicedExpr.
+ //
+ // Returns a pair containing the result of the evaluation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalSimpleExpr(StringRef Expr) const {
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+
+ if (Expr.empty())
+ return std::make_pair(EvalResult("Unexpected end of expression"), "");
+
+ if (Expr[0] == '(')
+ std::tie(SubExprResult, RemainingExpr) = evalParensExpr(Expr);
+ else if (Expr[0] == '*')
+ std::tie(SubExprResult, RemainingExpr) = evalLoadExpr(Expr);
+ else if (isalpha(Expr[0]))
+ std::tie(SubExprResult, RemainingExpr) = evalIdentifierExpr(Expr);
+ else if (isdigit(Expr[0]))
+ std::tie(SubExprResult, RemainingExpr) = evalNumberExpr(Expr);
+
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, RemainingExpr);
+
+ // Evaluate bit-slice if present.
+ if (RemainingExpr.startswith("["))
+ std::tie(SubExprResult, RemainingExpr) =
+ evalSliceExpr(std::make_pair(SubExprResult, RemainingExpr));
+
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate a bit-slice of an expression.
+ // A bit-slice has the form "<expr>[high:low]". The result of evaluating a
+ // slice is the bits between high and low (inclusive) in the original
+ // expression, right shifted so that the "low" bit is in position 0 in the
+ // result.
+ // Returns a pair containing the result of the slice operation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalSliceExpr(
+ std::pair<EvalResult, StringRef> Ctx) const{
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) = Ctx;
+
+ assert(RemainingExpr.startswith("[") && "Not a slice expr.");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult HighBitExpr;
+ std::tie(HighBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (HighBitExpr.hasError())
+ return std::make_pair(HighBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.startswith(":"))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected ':'"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult LowBitExpr;
+ std::tie(LowBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (LowBitExpr.hasError())
+ return std::make_pair(LowBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.startswith("]"))
+ return std::make_pair(unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected ']'"),
+ "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ unsigned HighBit = HighBitExpr.getValue();
+ unsigned LowBit = LowBitExpr.getValue();
+ uint64_t Mask = ((uint64_t)1 << (HighBit - LowBit + 1)) - 1;
+ uint64_t SlicedValue = (SubExprResult.getValue() >> LowBit) & Mask;
+ return std::make_pair(EvalResult(SlicedValue), RemainingExpr);
+ }
+
+ // Evaluate a "complex" expression.
+ // Takes an already evaluated subexpression and checks for the presence of a
+ // binary operator, computing the result of the binary operation if one is
+ // found. Used to make arithmetic expressions left-associative.
+ // Returns a pair containing the ultimate result of evaluating the
+ // expression, plus the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalComplexExpr(
+ std::pair<EvalResult, StringRef> Ctx) const {
+ EvalResult LHSResult;
+ StringRef RemainingExpr;
+ std::tie(LHSResult, RemainingExpr) = Ctx;
+
+ // If there was an error, or there's nothing left to evaluate, return the
+ // result.
+ if (LHSResult.hasError() || RemainingExpr == "")
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // Otherwise check if this is a binary expressioan.
+ BinOpToken BinOp;
+ std::tie(BinOp, RemainingExpr) = parseBinOpToken(RemainingExpr);
+
+ // If this isn't a recognized expression just return.
+ if (BinOp == BinOpToken::Invalid)
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // This is a recognized bin-op. Evaluate the RHS, then evaluate the binop.
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) = evalSimpleExpr(RemainingExpr);
+
+ // If there was an error evaluating the RHS, return it.
+ if (RHSResult.hasError())
+ return std::make_pair(RHSResult, RemainingExpr);
+
+ // This is a binary expression - evaluate and try to continue as a
+ // complex expr.
+ EvalResult ThisResult(computeBinOpResult(BinOp, LHSResult, RHSResult));
+
+ return evalComplexExpr(std::make_pair(ThisResult, RemainingExpr));
+ }
+
+ bool decodeInst(StringRef Symbol, MCInst &Inst, uint64_t &Size) const {
+ MCDisassembler *Dis = Checker.Disassembler;
+ StringRef SectionMem = Checker.getSubsectionStartingAt(Symbol);
+ StringRefMemoryObject SectionBytes(SectionMem, 0);
+
+ MCDisassembler::DecodeStatus S =
+ Dis->getInstruction(Inst, Size, SectionBytes, 0, nulls(), nulls());
+
+ return (S == MCDisassembler::Success);
+ }
+
+ };
+
+}
+
+bool RuntimeDyldChecker::check(StringRef CheckExpr) const {
+ CheckExpr = CheckExpr.trim();
+ DEBUG(llvm::dbgs() << "RuntimeDyldChecker: Checking '" << CheckExpr
+ << "'...\n");
+ RuntimeDyldCheckerExprEval P(*this, ErrStream);
+ bool Result = P.evaluate(CheckExpr);
+ (void)Result;
+ DEBUG(llvm::dbgs() << "RuntimeDyldChecker: '" << CheckExpr << "' "
+ << (Result ? "passed" : "FAILED") << ".\n");
+ return Result;
+}
+
+bool RuntimeDyldChecker::checkAllRulesInBuffer(StringRef RulePrefix,
+ MemoryBuffer* MemBuf) const {
+ bool DidAllTestsPass = true;
+ unsigned NumRules = 0;
+
+ const char *LineStart = MemBuf->getBufferStart();
+
+ // Eat whitespace.
+ while (LineStart != MemBuf->getBufferEnd() &&
+ std::isspace(*LineStart))
+ ++LineStart;
+
+ while (LineStart != MemBuf->getBufferEnd() && *LineStart != '\0') {
+ const char *LineEnd = LineStart;
+ while (LineEnd != MemBuf->getBufferEnd() &&
+ *LineEnd != '\r' && *LineEnd != '\n')
+ ++LineEnd;
+
+ StringRef Line(LineStart, LineEnd - LineStart);
+ if (Line.startswith(RulePrefix)) {
+ DidAllTestsPass &= check(Line.substr(RulePrefix.size()));
+ ++NumRules;
+ }
+
+ // Eat whitespace.
+ LineStart = LineEnd;
+ while (LineStart != MemBuf->getBufferEnd() &&
+ std::isspace(*LineStart))
+ ++LineStart;
+ }
+ return DidAllTestsPass && (NumRules != 0);
+}
+
+bool RuntimeDyldChecker::isSymbolValid(StringRef Symbol) const {
+ return RTDyld.getSymbolAddress(Symbol) != nullptr;
+}
+
+uint64_t RuntimeDyldChecker::getSymbolAddress(StringRef Symbol) const {
+ return RTDyld.getAnySymbolRemoteAddress(Symbol);
+}
+
+uint64_t RuntimeDyldChecker::readMemoryAtSymbol(StringRef Symbol,
+ int64_t Offset,
+ unsigned Size) const {
+ uint8_t *Src = RTDyld.getSymbolAddress(Symbol);
+ uint64_t Result = 0;
+ memcpy(&Result, Src + Offset, Size);
+ return Result;
+}
+
+StringRef RuntimeDyldChecker::getSubsectionStartingAt(StringRef Name) const {
+ RuntimeDyldImpl::SymbolTableMap::const_iterator pos =
+ RTDyld.GlobalSymbolTable.find(Name);
+ if (pos == RTDyld.GlobalSymbolTable.end())
+ return StringRef();
+ RuntimeDyldImpl::SymbolLoc Loc = pos->second;
+ uint8_t *SectionAddr = RTDyld.getSectionAddress(Loc.first);
+ return StringRef(reinterpret_cast<const char*>(SectionAddr) + Loc.second,
+ RTDyld.Sections[Loc.first].Size - Loc.second);
+}
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index f2c69fc..728138e 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -11,12 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dyld"
#include "RuntimeDyldELF.h"
#include "JITRegistrar.h"
#include "ObjectImageCommon.h"
#include "llvm/ADT/IntervalMap.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
@@ -25,122 +23,125 @@
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/ELF.h"
+#include "llvm/Support/MemoryBuffer.h"
+
using namespace llvm;
using namespace llvm::object;
+#define DEBUG_TYPE "dyld"
+
namespace {
-static inline
-error_code check(error_code Err) {
+static inline std::error_code check(std::error_code Err) {
if (Err) {
report_fatal_error(Err.message());
}
return Err;
}
-template<class ELFT>
-class DyldELFObject
- : public ELFObjectFile<ELFT> {
+template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
typedef Elf_Shdr_Impl<ELFT> Elf_Shdr;
typedef Elf_Sym_Impl<ELFT> Elf_Sym;
- typedef
- Elf_Rel_Impl<ELFT, false> Elf_Rel;
- typedef
- Elf_Rel_Impl<ELFT, true> Elf_Rela;
+ typedef Elf_Rel_Impl<ELFT, false> Elf_Rel;
+ typedef Elf_Rel_Impl<ELFT, true> Elf_Rela;
typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr;
- typedef typename ELFDataTypeTypedefHelper<
- ELFT>::value_type addr_type;
+ typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type;
+
+ std::unique_ptr<ObjectFile> UnderlyingFile;
public:
- DyldELFObject(MemoryBuffer *Wrapper, error_code &ec);
+ DyldELFObject(std::unique_ptr<ObjectFile> UnderlyingFile,
+ std::unique_ptr<MemoryBuffer> Wrapper, std::error_code &ec);
+
+ DyldELFObject(std::unique_ptr<MemoryBuffer> Wrapper, std::error_code &ec);
void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr);
// Methods for type inquiry through isa, cast and dyn_cast
static inline bool classof(const Binary *v) {
- return (isa<ELFObjectFile<ELFT> >(v)
- && classof(cast<ELFObjectFile
- <ELFT> >(v)));
+ return (isa<ELFObjectFile<ELFT>>(v) &&
+ classof(cast<ELFObjectFile<ELFT>>(v)));
}
- static inline bool classof(
- const ELFObjectFile<ELFT> *v) {
+ static inline bool classof(const ELFObjectFile<ELFT> *v) {
return v->isDyldType();
}
};
-template<class ELFT>
-class ELFObjectImage : public ObjectImageCommon {
- protected:
- DyldELFObject<ELFT> *DyldObj;
- bool Registered;
-
- public:
- ELFObjectImage(ObjectBuffer *Input,
- DyldELFObject<ELFT> *Obj)
- : ObjectImageCommon(Input, Obj),
- DyldObj(Obj),
- Registered(false) {}
-
- virtual ~ELFObjectImage() {
- if (Registered)
- deregisterWithDebugger();
- }
+template <class ELFT> class ELFObjectImage : public ObjectImageCommon {
+ bool Registered;
- // Subclasses can override these methods to update the image with loaded
- // addresses for sections and common symbols
- virtual void updateSectionAddress(const SectionRef &Sec, uint64_t Addr)
- {
- DyldObj->updateSectionAddress(Sec, Addr);
- }
+public:
+ ELFObjectImage(ObjectBuffer *Input, std::unique_ptr<DyldELFObject<ELFT>> Obj)
+ : ObjectImageCommon(Input, std::move(Obj)), Registered(false) {}
- virtual void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr)
- {
- DyldObj->updateSymbolAddress(Sym, Addr);
- }
+ virtual ~ELFObjectImage() {
+ if (Registered)
+ deregisterWithDebugger();
+ }
- virtual void registerWithDebugger()
- {
- JITRegistrar::getGDBRegistrar().registerObject(*Buffer);
- Registered = true;
- }
- virtual void deregisterWithDebugger()
- {
- JITRegistrar::getGDBRegistrar().deregisterObject(*Buffer);
- }
+ // Subclasses can override these methods to update the image with loaded
+ // addresses for sections and common symbols
+ void updateSectionAddress(const SectionRef &Sec, uint64_t Addr) override {
+ static_cast<DyldELFObject<ELFT>*>(getObjectFile())
+ ->updateSectionAddress(Sec, Addr);
+ }
+
+ void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr) override {
+ static_cast<DyldELFObject<ELFT>*>(getObjectFile())
+ ->updateSymbolAddress(Sym, Addr);
+ }
+
+ void registerWithDebugger() override {
+ JITRegistrar::getGDBRegistrar().registerObject(*Buffer);
+ Registered = true;
+ }
+ void deregisterWithDebugger() override {
+ JITRegistrar::getGDBRegistrar().deregisterObject(*Buffer);
+ }
};
// The MemoryBuffer passed into this constructor is just a wrapper around the
// actual memory. Ultimately, the Binary parent class will take ownership of
// this MemoryBuffer object but not the underlying memory.
-template<class ELFT>
-DyldELFObject<ELFT>::DyldELFObject(MemoryBuffer *Wrapper, error_code &ec)
- : ELFObjectFile<ELFT>(Wrapper, ec) {
+template <class ELFT>
+DyldELFObject<ELFT>::DyldELFObject(std::unique_ptr<MemoryBuffer> Wrapper,
+ std::error_code &EC)
+ : ELFObjectFile<ELFT>(std::move(Wrapper), EC) {
+ this->isDyldELFObject = true;
+}
+
+template <class ELFT>
+DyldELFObject<ELFT>::DyldELFObject(std::unique_ptr<ObjectFile> UnderlyingFile,
+ std::unique_ptr<MemoryBuffer> Wrapper,
+ std::error_code &EC)
+ : ELFObjectFile<ELFT>(std::move(Wrapper), EC),
+ UnderlyingFile(std::move(UnderlyingFile)) {
this->isDyldELFObject = true;
}
-template<class ELFT>
+template <class ELFT>
void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
uint64_t Addr) {
DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
- Elf_Shdr *shdr = const_cast<Elf_Shdr*>(
- reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
+ Elf_Shdr *shdr =
+ const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
// This assumes the address passed in matches the target address bitness
// The template-based type cast handles everything else.
shdr->sh_addr = static_cast<addr_type>(Addr);
}
-template<class ELFT>
+template <class ELFT>
void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
uint64_t Addr) {
- Elf_Sym *sym = const_cast<Elf_Sym*>(
- ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
+ Elf_Sym *sym = const_cast<Elf_Sym *>(
+ ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
// This assumes the address passed in matches the target address bitness
// The template-based type cast handles everything else.
@@ -178,60 +179,94 @@ void RuntimeDyldELF::deregisterEHFrames() {
RegisteredEHFrameSections.clear();
}
+ObjectImage *
+RuntimeDyldELF::createObjectImageFromFile(std::unique_ptr<object::ObjectFile> ObjFile) {
+ if (!ObjFile)
+ return nullptr;
+
+ std::error_code ec;
+ std::unique_ptr<MemoryBuffer> Buffer(
+ MemoryBuffer::getMemBuffer(ObjFile->getData(), "", false));
+
+ if (ObjFile->getBytesInAddress() == 4 && ObjFile->isLittleEndian()) {
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::little, 2, false>>>(
+ std::move(ObjFile), std::move(Buffer), ec);
+ return new ELFObjectImage<ELFType<support::little, 2, false>>(
+ nullptr, std::move(Obj));
+ } else if (ObjFile->getBytesInAddress() == 4 && !ObjFile->isLittleEndian()) {
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::big, 2, false>>>(
+ std::move(ObjFile), std::move(Buffer), ec);
+ return new ELFObjectImage<ELFType<support::big, 2, false>>(nullptr, std::move(Obj));
+ } else if (ObjFile->getBytesInAddress() == 8 && !ObjFile->isLittleEndian()) {
+ auto Obj = llvm::make_unique<DyldELFObject<ELFType<support::big, 2, true>>>(
+ std::move(ObjFile), std::move(Buffer), ec);
+ return new ELFObjectImage<ELFType<support::big, 2, true>>(nullptr,
+ std::move(Obj));
+ } else if (ObjFile->getBytesInAddress() == 8 && ObjFile->isLittleEndian()) {
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::little, 2, true>>>(
+ std::move(ObjFile), std::move(Buffer), ec);
+ return new ELFObjectImage<ELFType<support::little, 2, true>>(
+ nullptr, std::move(Obj));
+ } else
+ llvm_unreachable("Unexpected ELF format");
+}
+
ObjectImage *RuntimeDyldELF::createObjectImage(ObjectBuffer *Buffer) {
if (Buffer->getBufferSize() < ELF::EI_NIDENT)
llvm_unreachable("Unexpected ELF object size");
- std::pair<unsigned char, unsigned char> Ident = std::make_pair(
- (uint8_t)Buffer->getBufferStart()[ELF::EI_CLASS],
- (uint8_t)Buffer->getBufferStart()[ELF::EI_DATA]);
- error_code ec;
+ std::pair<unsigned char, unsigned char> Ident =
+ std::make_pair((uint8_t)Buffer->getBufferStart()[ELF::EI_CLASS],
+ (uint8_t)Buffer->getBufferStart()[ELF::EI_DATA]);
+ std::error_code ec;
+
+ std::unique_ptr<MemoryBuffer> Buf(Buffer->getMemBuffer());
if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB) {
- DyldELFObject<ELFType<support::little, 4, false> > *Obj =
- new DyldELFObject<ELFType<support::little, 4, false> >(
- Buffer->getMemBuffer(), ec);
- return new ELFObjectImage<ELFType<support::little, 4, false> >(Buffer, Obj);
- }
- else if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2MSB) {
- DyldELFObject<ELFType<support::big, 4, false> > *Obj =
- new DyldELFObject<ELFType<support::big, 4, false> >(
- Buffer->getMemBuffer(), ec);
- return new ELFObjectImage<ELFType<support::big, 4, false> >(Buffer, Obj);
- }
- else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2MSB) {
- DyldELFObject<ELFType<support::big, 8, true> > *Obj =
- new DyldELFObject<ELFType<support::big, 8, true> >(
- Buffer->getMemBuffer(), ec);
- return new ELFObjectImage<ELFType<support::big, 8, true> >(Buffer, Obj);
- }
- else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2LSB) {
- DyldELFObject<ELFType<support::little, 8, true> > *Obj =
- new DyldELFObject<ELFType<support::little, 8, true> >(
- Buffer->getMemBuffer(), ec);
- return new ELFObjectImage<ELFType<support::little, 8, true> >(Buffer, Obj);
- }
- else
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::little, 4, false>>>(
+ std::move(Buf), ec);
+ return new ELFObjectImage<ELFType<support::little, 4, false>>(
+ Buffer, std::move(Obj));
+ } else if (Ident.first == ELF::ELFCLASS32 &&
+ Ident.second == ELF::ELFDATA2MSB) {
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::big, 4, false>>>(
+ std::move(Buf), ec);
+ return new ELFObjectImage<ELFType<support::big, 4, false>>(Buffer,
+ std::move(Obj));
+ } else if (Ident.first == ELF::ELFCLASS64 &&
+ Ident.second == ELF::ELFDATA2MSB) {
+ auto Obj = llvm::make_unique<DyldELFObject<ELFType<support::big, 8, true>>>(
+ std::move(Buf), ec);
+ return new ELFObjectImage<ELFType<support::big, 8, true>>(Buffer, std::move(Obj));
+ } else if (Ident.first == ELF::ELFCLASS64 &&
+ Ident.second == ELF::ELFDATA2LSB) {
+ auto Obj =
+ llvm::make_unique<DyldELFObject<ELFType<support::little, 8, true>>>(
+ std::move(Buf), ec);
+ return new ELFObjectImage<ELFType<support::little, 8, true>>(Buffer, std::move(Obj));
+ } else
llvm_unreachable("Unexpected ELF format");
}
-RuntimeDyldELF::~RuntimeDyldELF() {
-}
+RuntimeDyldELF::~RuntimeDyldELF() {}
void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
uint64_t SymOffset) {
switch (Type) {
default:
llvm_unreachable("Relocation type not implemented yet!");
- break;
+ break;
case ELF::R_X86_64_64: {
- uint64_t *Target = reinterpret_cast<uint64_t*>(Section.Address + Offset);
+ uint64_t *Target = reinterpret_cast<uint64_t *>(Section.Address + Offset);
*Target = Value + Addend;
- DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend))
- << " at " << format("%p\n",Target));
+ DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Target));
break;
}
case ELF::R_X86_64_32:
@@ -239,20 +274,20 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
Value += Addend;
assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
(Type == ELF::R_X86_64_32S &&
- ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
+ ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
- uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
*Target = TruncatedAddr;
- DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr)
- << " at " << format("%p\n",Target));
+ DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Target));
break;
}
case ELF::R_X86_64_GOTPCREL: {
// findGOTEntry returns the 'G + GOT' part of the relocation calculation
// based on the load/target address of the GOT (not the current/local addr).
uint64_t GOTAddr = findGOTEntry(Value, SymOffset);
- uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
- uint64_t FinalAddress = Section.LoadAddress + Offset;
+ uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
+ uint64_t FinalAddress = Section.LoadAddress + Offset;
// The processRelocationRef method combines the symbol offset and the addend
// and in most cases that's what we want. For this relocation type, we need
// the raw addend, so we subtract the symbol offset to get it.
@@ -265,10 +300,10 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
case ELF::R_X86_64_PC32: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress
- + Offset);
- uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
- uint64_t FinalAddress = Section.LoadAddress + Offset;
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
+ uint64_t FinalAddress = Section.LoadAddress + Offset;
int64_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
assert(RealOffset <= INT32_MAX && RealOffset >= INT32_MIN);
int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
@@ -278,10 +313,10 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
case ELF::R_X86_64_PC64: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint64_t *Placeholder = reinterpret_cast<uint64_t*>(Section.ObjAddress
- + Offset);
- uint64_t *Target = reinterpret_cast<uint64_t*>(Section.Address + Offset);
- uint64_t FinalAddress = Section.LoadAddress + Offset;
+ uint64_t *Placeholder =
+ reinterpret_cast<uint64_t *>(Section.ObjAddress + Offset);
+ uint64_t *Target = reinterpret_cast<uint64_t *>(Section.Address + Offset);
+ uint64_t FinalAddress = Section.LoadAddress + Offset;
*Target = *Placeholder + Value + Addend - FinalAddress;
break;
}
@@ -289,53 +324,48 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
}
void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend) {
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
switch (Type) {
case ELF::R_386_32: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress
- + Offset);
- uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
*Target = *Placeholder + Value + Addend;
break;
}
case ELF::R_386_PC32: {
// Get the placeholder value from the generated object since
// a previous relocation attempt may have overwritten the loaded version
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress
- + Offset);
- uint32_t *Target = reinterpret_cast<uint32_t*>(Section.Address + Offset);
- uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
+ uint32_t *Target = reinterpret_cast<uint32_t *>(Section.Address + Offset);
+ uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
uint32_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
*Target = RealOffset;
break;
- }
- default:
- // There are other relocation types, but it appears these are the
- // only ones currently used by the LLVM ELF object writer
- llvm_unreachable("Relocation type not implemented yet!");
- break;
+ }
+ default:
+ // There are other relocation types, but it appears these are the
+ // only ones currently used by the LLVM ELF object writer
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
}
}
void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend) {
- uint32_t *TargetPtr = reinterpret_cast<uint32_t*>(Section.Address + Offset);
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint32_t *TargetPtr = reinterpret_cast<uint32_t *>(Section.Address + Offset);
uint64_t FinalAddress = Section.LoadAddress + Offset;
DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
<< format("%llx", Section.Address + Offset)
- << " FinalAddress: 0x" << format("%llx",FinalAddress)
- << " Value: 0x" << format("%llx",Value)
- << " Type: 0x" << format("%x",Type)
- << " Addend: 0x" << format("%llx",Addend)
+ << " FinalAddress: 0x" << format("%llx", FinalAddress)
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x" << format("%llx", Addend)
<< "\n");
switch (Type) {
@@ -343,7 +373,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
llvm_unreachable("Relocation type not implemented yet!");
break;
case ELF::R_AARCH64_ABS64: {
- uint64_t *TargetPtr = reinterpret_cast<uint64_t*>(Section.Address + Offset);
+ uint64_t *TargetPtr =
+ reinterpret_cast<uint64_t *>(Section.Address + Offset);
*TargetPtr = Value + Addend;
break;
}
@@ -419,35 +450,77 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation");
break;
}
+ case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
+ // Operation: Page(S+A) - Page(P)
+ uint64_t Result =
+ ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
+
+ // Check that -2^32 <= X < 2^32
+ assert(static_cast<int64_t>(Result) >= (-1LL << 32) &&
+ static_cast<int64_t>(Result) < (1LL << 32) &&
+ "overflow check failed for relocation");
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0x9f00001fU;
+ // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
+ // from bits 32:12 of X.
+ *TargetPtr |= ((Result & 0x3000U) << (29 - 12));
+ *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5));
+ break;
+ }
+ case ELF::R_AARCH64_LDST32_ABS_LO12_NC: {
+ // Operation: S + A
+ uint64_t Result = Value + Addend;
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xffc003ffU;
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:2 of X
+ *TargetPtr |= ((Result & 0xffc) << (10 - 2));
+ break;
+ }
+ case ELF::R_AARCH64_LDST64_ABS_LO12_NC: {
+ // Operation: S + A
+ uint64_t Result = Value + Addend;
+
+ // AArch64 code is emitted with .rela relocations. The data already in any
+ // bits affected by the relocation on entry is garbage.
+ *TargetPtr &= 0xffc003ffU;
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:3 of X
+ *TargetPtr |= ((Result & 0xff8) << (10 - 3));
+ break;
+ }
}
}
void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend) {
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
// TODO: Add Thumb relocations.
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress +
- Offset);
- uint32_t* TargetPtr = (uint32_t*)(Section.Address + Offset);
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
+ uint32_t *TargetPtr = (uint32_t *)(Section.Address + Offset);
uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
Value += Addend;
DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
<< Section.Address + Offset
- << " FinalAddress: " << format("%p",FinalAddress)
- << " Value: " << format("%x",Value)
- << " Type: " << format("%x",Type)
- << " Addend: " << format("%x",Addend)
- << "\n");
+ << " FinalAddress: " << format("%p", FinalAddress) << " Value: "
+ << format("%x", Value) << " Type: " << format("%x", Type)
+ << " Addend: " << format("%x", Addend) << "\n");
- switch(Type) {
+ switch (Type) {
default:
llvm_unreachable("Not implemented relocation type!");
+ case ELF::R_ARM_NONE:
+ break;
// Write a 32bit value to relocation address, taking into account the
// implicit addend encoded in the target.
+ case ELF::R_ARM_PREL31:
case ELF::R_ARM_TARGET1:
case ELF::R_ARM_ABS32:
*TargetPtr = *Placeholder + Value;
@@ -475,8 +548,8 @@ void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
*TargetPtr |= ((Value >> 12) & 0xF) << 16;
break;
// Write 24 bit relative value to the branch instruction.
- case ELF::R_ARM_PC24 : // Fall through.
- case ELF::R_ARM_CALL : // Fall through.
+ case ELF::R_ARM_PC24: // Fall through.
+ case ELF::R_ARM_CALL: // Fall through.
case ELF::R_ARM_JUMP24: {
int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
RelValue = (RelValue & 0x03FFFFFC) >> 2;
@@ -496,25 +569,20 @@ void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
}
void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend) {
- uint32_t *Placeholder = reinterpret_cast<uint32_t*>(Section.ObjAddress +
- Offset);
- uint32_t* TargetPtr = (uint32_t*)(Section.Address + Offset);
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t *>(Section.ObjAddress + Offset);
+ uint32_t *TargetPtr = (uint32_t *)(Section.Address + Offset);
Value += Addend;
DEBUG(dbgs() << "resolveMipselocation, LocalAddress: "
- << Section.Address + Offset
- << " FinalAddress: "
- << format("%p",Section.LoadAddress + Offset)
- << " Value: " << format("%x",Value)
- << " Type: " << format("%x",Type)
- << " Addend: " << format("%x",Addend)
- << "\n");
+ << Section.Address + Offset << " FinalAddress: "
+ << format("%p", Section.LoadAddress + Offset) << " Value: "
+ << format("%x", Value) << " Type: " << format("%x", Type)
+ << " Addend: " << format("%x", Addend) << "\n");
- switch(Type) {
+ switch (Type) {
default:
llvm_unreachable("Not implemented relocation type!");
break;
@@ -522,13 +590,13 @@ void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
*TargetPtr = Value + (*Placeholder);
break;
case ELF::R_MIPS_26:
- *TargetPtr = ((*Placeholder) & 0xfc000000) | (( Value & 0x0fffffff) >> 2);
+ *TargetPtr = ((*Placeholder) & 0xfc000000) | ((Value & 0x0fffffff) >> 2);
break;
case ELF::R_MIPS_HI16:
// Get the higher 16-bits. Also add 1 if bit 15 is 1.
Value += ((*Placeholder) & 0x0000ffff) << 16;
- *TargetPtr = ((*Placeholder) & 0xffff0000) |
- (((Value + 0x8000) >> 16) & 0xffff);
+ *TargetPtr =
+ ((*Placeholder) & 0xffff0000) | (((Value + 0x8000) >> 16) & 0xffff);
break;
case ELF::R_MIPS_LO16:
Value += ((*Placeholder) & 0x0000ffff);
@@ -539,41 +607,47 @@ void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
// are used for internal JIT purpose. These relocations are similar to
// R_MIPS_HI16 and R_MIPS_LO16, but they do not take any addend into
// account.
- *TargetPtr = ((*TargetPtr) & 0xffff0000) |
- (((Value + 0x8000) >> 16) & 0xffff);
+ *TargetPtr =
+ ((*TargetPtr) & 0xffff0000) | (((Value + 0x8000) >> 16) & 0xffff);
break;
case ELF::R_MIPS_UNUSED2:
*TargetPtr = ((*TargetPtr) & 0xffff0000) | (Value & 0xffff);
break;
- }
+ }
}
-// Return the .TOC. section address to R_PPC64_TOC relocations.
-uint64_t RuntimeDyldELF::findPPC64TOC() const {
+// Return the .TOC. section and offset.
+void RuntimeDyldELF::findPPC64TOCSection(ObjectImage &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Set a default SectionID in case we do not find a TOC section below.
+ // This may happen for references to TOC base base (sym@toc, .odp
+ // relocation) without a .toc directive. In this case just use the
+ // first section (which is usually the .odp) since the code won't
+ // reference the .toc base directly.
+ Rel.SymbolName = NULL;
+ Rel.SectionID = 0;
+
// The TOC consists of sections .got, .toc, .tocbss, .plt in that
// order. The TOC starts where the first of these sections starts.
- SectionList::const_iterator it = Sections.begin();
- SectionList::const_iterator ite = Sections.end();
- for (; it != ite; ++it) {
- if (it->Name == ".got" ||
- it->Name == ".toc" ||
- it->Name == ".tocbss" ||
- it->Name == ".plt")
+ for (section_iterator si = Obj.begin_sections(), se = Obj.end_sections();
+ si != se; ++si) {
+
+ StringRef SectionName;
+ check(si->getName(SectionName));
+
+ if (SectionName == ".got"
+ || SectionName == ".toc"
+ || SectionName == ".tocbss"
+ || SectionName == ".plt") {
+ Rel.SectionID = findOrEmitSection(Obj, *si, false, LocalSections);
break;
+ }
}
- if (it == ite) {
- // This may happen for
- // * references to TOC base base (sym@toc, .odp relocation) without
- // a .toc directive.
- // In this case just use the first section (which is usually
- // the .odp) since the code won't reference the .toc base
- // directly.
- it = Sections.begin();
- }
- assert (it != ite);
+
// Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
// thus permitting a full 64 Kbytes segment.
- return it->LoadAddress + 0x8000;
+ Rel.Addend = 0x8000;
}
// Returns the sections and offset associated with the ODP entry referenced
@@ -583,10 +657,8 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
RelocationValueRef &Rel) {
// Get the ELF symbol value (st_value) to compare with Relocation offset in
// .opd entries
-
- error_code err;
- for (section_iterator si = Obj.begin_sections(),
- se = Obj.end_sections(); si != se; si.increment(err)) {
+ for (section_iterator si = Obj.begin_sections(), se = Obj.end_sections();
+ si != se; ++si) {
section_iterator RelSecI = si->getRelocatedSection();
if (RelSecI == Obj.end_sections())
continue;
@@ -596,16 +668,15 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
if (RelSectionName != ".opd")
continue;
- for (relocation_iterator i = si->begin_relocations(),
- e = si->end_relocations(); i != e;) {
- check(err);
-
+ for (relocation_iterator i = si->relocation_begin(),
+ e = si->relocation_end();
+ i != e;) {
// The R_PPC64_ADDR64 relocation indicates the first field
// of a .opd entry
uint64_t TypeFunc;
check(i->getType(TypeFunc));
if (TypeFunc != ELF::R_PPC64_ADDR64) {
- i.increment(err);
+ ++i;
continue;
}
@@ -615,10 +686,9 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
int64_t Addend;
check(getELFRelocationAddend(*i, Addend));
- i = i.increment(err);
+ ++i;
if (i == e)
break;
- check(err);
// Just check if following relocation is a R_PPC64_TOC
uint64_t TypeTOC;
@@ -634,7 +704,9 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
section_iterator tsi(Obj.end_sections());
check(TargetSymbol->getSection(tsi));
- Rel.SectionID = findOrEmitSection(Obj, (*tsi), true, LocalSections);
+ bool IsCode = false;
+ tsi->isText(IsCode);
+ Rel.SectionID = findOrEmitSection(Obj, (*tsi), IsCode, LocalSections);
Rel.Addend = (intptr_t)Addend;
return;
}
@@ -642,69 +714,103 @@ void RuntimeDyldELF::findOPDEntrySection(ObjectImage &Obj,
llvm_unreachable("Attempting to get address of ODP entry!");
}
-// Relocation masks following the #lo(value), #hi(value), #higher(value),
-// and #highest(value) macros defined in section 4.5.1. Relocation Types
-// in PPC-elf64abi document.
-//
-static inline
-uint16_t applyPPClo (uint64_t value)
-{
- return value & 0xffff;
-}
+// Relocation masks following the #lo(value), #hi(value), #ha(value),
+// #higher(value), #highera(value), #highest(value), and #highesta(value)
+// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
+// document.
-static inline
-uint16_t applyPPChi (uint64_t value)
-{
+static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
+
+static inline uint16_t applyPPChi(uint64_t value) {
return (value >> 16) & 0xffff;
}
-static inline
-uint16_t applyPPChigher (uint64_t value)
-{
+static inline uint16_t applyPPCha (uint64_t value) {
+ return ((value + 0x8000) >> 16) & 0xffff;
+}
+
+static inline uint16_t applyPPChigher(uint64_t value) {
return (value >> 32) & 0xffff;
}
-static inline
-uint16_t applyPPChighest (uint64_t value)
-{
+static inline uint16_t applyPPChighera (uint64_t value) {
+ return ((value + 0x8000) >> 32) & 0xffff;
+}
+
+static inline uint16_t applyPPChighest(uint64_t value) {
return (value >> 48) & 0xffff;
}
+static inline uint16_t applyPPChighesta (uint64_t value) {
+ return ((value + 0x8000) >> 48) & 0xffff;
+}
+
void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend) {
- uint8_t* LocalAddress = Section.Address + Offset;
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.Address + Offset;
switch (Type) {
default:
llvm_unreachable("Relocation type not implemented yet!");
- break;
- case ELF::R_PPC64_ADDR16_LO :
- writeInt16BE(LocalAddress, applyPPClo (Value + Addend));
break;
- case ELF::R_PPC64_ADDR16_HI :
- writeInt16BE(LocalAddress, applyPPChi (Value + Addend));
+ case ELF::R_PPC64_ADDR16:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
break;
- case ELF::R_PPC64_ADDR16_HIGHER :
- writeInt16BE(LocalAddress, applyPPChigher (Value + Addend));
+ case ELF::R_PPC64_ADDR16_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
break;
- case ELF::R_PPC64_ADDR16_HIGHEST :
- writeInt16BE(LocalAddress, applyPPChighest (Value + Addend));
+ case ELF::R_PPC64_ADDR16_LO:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
break;
- case ELF::R_PPC64_ADDR14 : {
+ case ELF::R_PPC64_ADDR16_LO_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
+ break;
+ case ELF::R_PPC64_ADDR16_HI:
+ writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HA:
+ writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHER:
+ writeInt16BE(LocalAddress, applyPPChigher(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHERA:
+ writeInt16BE(LocalAddress, applyPPChighera(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHEST:
+ writeInt16BE(LocalAddress, applyPPChighest(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHESTA:
+ writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR14: {
assert(((Value + Addend) & 3) == 0);
// Preserve the AA/LK bits in the branch instruction
- uint8_t aalk = *(LocalAddress+3);
+ uint8_t aalk = *(LocalAddress + 3);
writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
} break;
- case ELF::R_PPC64_ADDR32 : {
+ case ELF::R_PPC64_REL16_LO: {
+ uint64_t FinalAddress = (Section.LoadAddress + Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPClo(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HI: {
+ uint64_t FinalAddress = (Section.LoadAddress + Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPChi(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HA: {
+ uint64_t FinalAddress = (Section.LoadAddress + Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPCha(Delta));
+ } break;
+ case ELF::R_PPC64_ADDR32: {
int32_t Result = static_cast<int32_t>(Value + Addend);
if (SignExtend32<32>(Result) != Result)
llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
writeInt32BE(LocalAddress, Result);
} break;
- case ELF::R_PPC64_REL24 : {
+ case ELF::R_PPC64_REL24: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend);
if (SignExtend32<24>(delta) != delta)
@@ -712,7 +818,7 @@ void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
// Generates a 'bl <address>' instruction
writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC));
} break;
- case ELF::R_PPC64_REL32 : {
+ case ELF::R_PPC64_REL32: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend);
if (SignExtend32<32>(delta) != delta)
@@ -724,30 +830,15 @@ void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
uint64_t Delta = Value - FinalAddress + Addend;
writeInt64BE(LocalAddress, Delta);
} break;
- case ELF::R_PPC64_ADDR64 :
+ case ELF::R_PPC64_ADDR64:
writeInt64BE(LocalAddress, Value + Addend);
break;
- case ELF::R_PPC64_TOC :
- writeInt64BE(LocalAddress, findPPC64TOC());
- break;
- case ELF::R_PPC64_TOC16 : {
- uint64_t TOCStart = findPPC64TOC();
- Value = applyPPClo((Value + Addend) - TOCStart);
- writeInt16BE(LocalAddress, applyPPClo(Value));
- } break;
- case ELF::R_PPC64_TOC16_DS : {
- uint64_t TOCStart = findPPC64TOC();
- Value = ((Value + Addend) - TOCStart);
- writeInt16BE(LocalAddress, applyPPClo(Value));
- } break;
}
}
void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend) {
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
uint8_t *LocalAddress = Section.Address + Offset;
switch (Type) {
default:
@@ -807,66 +898,63 @@ void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
}
void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
uint64_t SymOffset) {
switch (Arch) {
case Triple::x86_64:
resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
break;
case Triple::x86:
- resolveX86Relocation(Section, Offset,
- (uint32_t)(Value & 0xffffffffL), Type,
+ resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
(uint32_t)(Addend & 0xffffffffL));
break;
case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::arm64:
+ case Triple::arm64_be:
resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
break;
- case Triple::arm: // Fall through.
+ case Triple::arm: // Fall through.
+ case Triple::armeb:
case Triple::thumb:
- resolveARMRelocation(Section, Offset,
- (uint32_t)(Value & 0xffffffffL), Type,
+ case Triple::thumbeb:
+ resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
(uint32_t)(Addend & 0xffffffffL));
break;
- case Triple::mips: // Fall through.
+ case Triple::mips: // Fall through.
case Triple::mipsel:
- resolveMIPSRelocation(Section, Offset,
- (uint32_t)(Value & 0xffffffffL), Type,
- (uint32_t)(Addend & 0xffffffffL));
+ resolveMIPSRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL),
+ Type, (uint32_t)(Addend & 0xffffffffL));
break;
- case Triple::ppc64: // Fall through.
+ case Triple::ppc64: // Fall through.
case Triple::ppc64le:
resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
break;
case Triple::systemz:
resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
break;
- default: llvm_unreachable("Unsupported CPU type!");
+ default:
+ llvm_unreachable("Unsupported CPU type!");
}
}
-void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
- RelocationRef RelI,
- ObjectImage &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- const SymbolTableMap &Symbols,
- StubMap &Stubs) {
+relocation_iterator RuntimeDyldELF::processRelocationRef(
+ unsigned SectionID, relocation_iterator RelI, ObjectImage &Obj,
+ ObjSectionToIDMap &ObjSectionToID, const SymbolTableMap &Symbols,
+ StubMap &Stubs) {
uint64_t RelType;
- Check(RelI.getType(RelType));
+ Check(RelI->getType(RelType));
int64_t Addend;
- Check(getELFRelocationAddend(RelI, Addend));
- symbol_iterator Symbol = RelI.getSymbol();
+ Check(getELFRelocationAddend(*RelI, Addend));
+ symbol_iterator Symbol = RelI->getSymbol();
// Obtain the symbol name which is referenced in the relocation
StringRef TargetName;
if (Symbol != Obj.end_symbols())
Symbol->getName(TargetName);
- DEBUG(dbgs() << "\t\tRelType: " << RelType
- << " Addend: " << Addend
- << " TargetName: " << TargetName
- << "\n");
+ DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
+ << " TargetName: " << TargetName << "\n");
RelocationValueRef Value;
// First search for the symbol in the local symbol table
SymbolTableMap::const_iterator lsi = Symbols.end();
@@ -890,53 +978,49 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
Value.Addend = gsi->second.second + Addend;
} else {
switch (SymType) {
- case SymbolRef::ST_Debug: {
- // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
- // and can be changed by another developers. Maybe best way is add
- // a new symbol type ST_Section to SymbolRef and use it.
- section_iterator si(Obj.end_sections());
- Symbol->getSection(si);
- if (si == Obj.end_sections())
- llvm_unreachable("Symbol section not found, bad object file format!");
- DEBUG(dbgs() << "\t\tThis is section symbol\n");
- // Default to 'true' in case isText fails (though it never does).
- bool isCode = true;
- si->isText(isCode);
- Value.SectionID = findOrEmitSection(Obj,
- (*si),
- isCode,
- ObjSectionToID);
- Value.Addend = Addend;
- break;
- }
- case SymbolRef::ST_Data:
- case SymbolRef::ST_Unknown: {
- Value.SymbolName = TargetName.data();
- Value.Addend = Addend;
-
- // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
- // will manifest here as a NULL symbol name.
- // We can set this as a valid (but empty) symbol name, and rely
- // on addRelocationForSymbol to handle this.
- if (!Value.SymbolName)
- Value.SymbolName = "";
- break;
- }
- default:
- llvm_unreachable("Unresolved symbol type!");
- break;
+ case SymbolRef::ST_Debug: {
+ // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
+ // and can be changed by another developers. Maybe best way is add
+ // a new symbol type ST_Section to SymbolRef and use it.
+ section_iterator si(Obj.end_sections());
+ Symbol->getSection(si);
+ if (si == Obj.end_sections())
+ llvm_unreachable("Symbol section not found, bad object file format!");
+ DEBUG(dbgs() << "\t\tThis is section symbol\n");
+ // Default to 'true' in case isText fails (though it never does).
+ bool isCode = true;
+ si->isText(isCode);
+ Value.SectionID = findOrEmitSection(Obj, (*si), isCode, ObjSectionToID);
+ Value.Addend = Addend;
+ break;
+ }
+ case SymbolRef::ST_Data:
+ case SymbolRef::ST_Unknown: {
+ Value.SymbolName = TargetName.data();
+ Value.Addend = Addend;
+
+ // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
+ // will manifest here as a NULL symbol name.
+ // We can set this as a valid (but empty) symbol name, and rely
+ // on addRelocationForSymbol to handle this.
+ if (!Value.SymbolName)
+ Value.SymbolName = "";
+ break;
+ }
+ default:
+ llvm_unreachable("Unresolved symbol type!");
+ break;
}
}
}
uint64_t Offset;
- Check(RelI.getOffset(Offset));
+ Check(RelI->getOffset(Offset));
- DEBUG(dbgs() << "\t\tSectionID: " << SectionID
- << " Offset: " << Offset
+ DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
<< "\n");
- if (Arch == Triple::aarch64 &&
- (RelType == ELF::R_AARCH64_CALL26 ||
- RelType == ELF::R_AARCH64_JUMP26)) {
+ if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be ||
+ Arch == Triple::arm64 || Arch == Triple::arm64_be) &&
+ (RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26)) {
// This is an AArch64 branch relocation, need to use a stub function.
DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
SectionEntry &Section = Sections[SectionID];
@@ -944,24 +1028,21 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
// Look for an existing stub.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end()) {
- resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + i->second, RelType, 0);
+ resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second,
+ RelType, 0);
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
- uint8_t *StubTargetAddr = createStubFunction(Section.Address +
- Section.StubOffset);
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset);
- RelocationEntry REmovz_g3(SectionID,
- StubTargetAddr - Section.Address,
+ RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.Address,
ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
- RelocationEntry REmovk_g2(SectionID,
- StubTargetAddr - Section.Address + 4,
+ RelocationEntry REmovk_g2(SectionID, StubTargetAddr - Section.Address + 4,
ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
- RelocationEntry REmovk_g1(SectionID,
- StubTargetAddr - Section.Address + 8,
+ RelocationEntry REmovk_g1(SectionID, StubTargetAddr - Section.Address + 8,
ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
RelocationEntry REmovk_g0(SectionID,
StubTargetAddr - Section.Address + 12,
@@ -979,14 +1060,13 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
addRelocationForSection(REmovk_g0, Value.SectionID);
}
resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + Section.StubOffset,
- RelType, 0);
+ (uint64_t)Section.Address + Section.StubOffset, RelType,
+ 0);
Section.StubOffset += getMaxStubSize();
}
} else if (Arch == Triple::arm &&
- (RelType == ELF::R_ARM_PC24 ||
- RelType == ELF::R_ARM_CALL ||
- RelType == ELF::R_ARM_JUMP24)) {
+ (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
+ RelType == ELF::R_ARM_JUMP24)) {
// This is an ARM branch relocation, need to use a stub function.
DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.");
SectionEntry &Section = Sections[SectionID];
@@ -994,15 +1074,15 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
// Look for an existing stub.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end()) {
- resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + i->second, RelType, 0);
+ resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second,
+ RelType, 0);
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
- uint8_t *StubTargetAddr = createStubFunction(Section.Address +
- Section.StubOffset);
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset);
RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
ELF::R_ARM_PRIVATE_0, Value.Addend);
if (Value.SymbolName)
@@ -1011,8 +1091,8 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
addRelocationForSection(RE, Value.SectionID);
resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + Section.StubOffset,
- RelType, 0);
+ (uint64_t)Section.Address + Section.StubOffset, RelType,
+ 0);
Section.StubOffset += getMaxStubSize();
}
} else if ((Arch == Triple::mipsel || Arch == Triple::mips) &&
@@ -1038,15 +1118,13 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
- uint8_t *StubTargetAddr = createStubFunction(Section.Address +
- Section.StubOffset);
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset);
// Creating Hi and Lo relocations for the filled stub instructions.
- RelocationEntry REHi(SectionID,
- StubTargetAddr - Section.Address,
+ RelocationEntry REHi(SectionID, StubTargetAddr - Section.Address,
ELF::R_MIPS_UNUSED1, Value.Addend);
- RelocationEntry RELo(SectionID,
- StubTargetAddr - Section.Address + 4,
+ RelocationEntry RELo(SectionID, StubTargetAddr - Section.Address + 4,
ELF::R_MIPS_UNUSED2, Value.Addend);
if (Value.SymbolName) {
@@ -1063,6 +1141,10 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
}
} else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
if (RelType == ELF::R_PPC64_REL24) {
+ // Determine ABI variant in use for this object.
+ unsigned AbiVariant;
+ Obj.getObjectFile()->getPlatformFlags(AbiVariant);
+ AbiVariant &= ELF::EF_PPC64_ABI;
// A PPC branch relocation will need a stub function if the target is
// an external symbol (Symbol::ST_Unknown) or if the target address
// is not within the signed 24-bits branch address.
@@ -1070,9 +1152,18 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
uint8_t *Target = Section.Address + Offset;
bool RangeOverflow = false;
if (SymType != SymbolRef::ST_Unknown) {
- // A function call may points to the .opd entry, so the final symbol value
- // in calculated based in the relocation values in .opd section.
- findOPDEntrySection(Obj, ObjSectionToID, Value);
+ if (AbiVariant != 2) {
+ // In the ELFv1 ABI, a function call may point to the .opd entry,
+ // so the final symbol value is calculated based on the relocation
+ // values in the .opd section.
+ findOPDEntrySection(Obj, ObjSectionToID, Value);
+ } else {
+ // In the ELFv2 ABI, a function symbol may provide a local entry
+ // point, which must be used for direct calls.
+ uint8_t SymOther;
+ Symbol->getOther(SymOther);
+ Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther);
+ }
uint8_t *RelocTarget = Sections[Value.SectionID].Address + Value.Addend;
int32_t delta = static_cast<int32_t>(Target - RelocTarget);
// If it is within 24-bits branch range, just set the branch target
@@ -1099,53 +1190,100 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
- uint8_t *StubTargetAddr = createStubFunction(Section.Address +
- Section.StubOffset);
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset,
+ AbiVariant);
RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
ELF::R_PPC64_ADDR64, Value.Addend);
// Generates the 64-bits address loads as exemplified in section
- // 4.5.1 in PPC64 ELF ABI.
- RelocationEntry REhst(SectionID,
- StubTargetAddr - Section.Address + 2,
+ // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
+ // apply to the low part of the instructions, so we have to update
+ // the offset according to the target endianness.
+ uint64_t StubRelocOffset = StubTargetAddr - Section.Address;
+ if (!IsTargetLittleEndian)
+ StubRelocOffset += 2;
+
+ RelocationEntry REhst(SectionID, StubRelocOffset + 0,
ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
- RelocationEntry REhr(SectionID,
- StubTargetAddr - Section.Address + 6,
+ RelocationEntry REhr(SectionID, StubRelocOffset + 4,
ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
- RelocationEntry REh(SectionID,
- StubTargetAddr - Section.Address + 14,
+ RelocationEntry REh(SectionID, StubRelocOffset + 12,
ELF::R_PPC64_ADDR16_HI, Value.Addend);
- RelocationEntry REl(SectionID,
- StubTargetAddr - Section.Address + 18,
+ RelocationEntry REl(SectionID, StubRelocOffset + 16,
ELF::R_PPC64_ADDR16_LO, Value.Addend);
if (Value.SymbolName) {
addRelocationForSymbol(REhst, Value.SymbolName);
- addRelocationForSymbol(REhr, Value.SymbolName);
- addRelocationForSymbol(REh, Value.SymbolName);
- addRelocationForSymbol(REl, Value.SymbolName);
+ addRelocationForSymbol(REhr, Value.SymbolName);
+ addRelocationForSymbol(REh, Value.SymbolName);
+ addRelocationForSymbol(REl, Value.SymbolName);
} else {
addRelocationForSection(REhst, Value.SectionID);
- addRelocationForSection(REhr, Value.SectionID);
- addRelocationForSection(REh, Value.SectionID);
- addRelocationForSection(REl, Value.SectionID);
+ addRelocationForSection(REhr, Value.SectionID);
+ addRelocationForSection(REh, Value.SectionID);
+ addRelocationForSection(REl, Value.SectionID);
}
resolveRelocation(Section, Offset,
(uint64_t)Section.Address + Section.StubOffset,
RelType, 0);
- if (SymType == SymbolRef::ST_Unknown)
- // Restore the TOC for external calls
- writeInt32BE(Target+4, 0xE8410028); // ld r2,40(r1)
Section.StubOffset += getMaxStubSize();
}
+ if (SymType == SymbolRef::ST_Unknown) {
+ // Restore the TOC for external calls
+ if (AbiVariant == 2)
+ writeInt32BE(Target + 4, 0xE8410018); // ld r2,28(r1)
+ else
+ writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1)
+ }
}
+ } else if (RelType == ELF::R_PPC64_TOC16 ||
+ RelType == ELF::R_PPC64_TOC16_DS ||
+ RelType == ELF::R_PPC64_TOC16_LO ||
+ RelType == ELF::R_PPC64_TOC16_LO_DS ||
+ RelType == ELF::R_PPC64_TOC16_HI ||
+ RelType == ELF::R_PPC64_TOC16_HA) {
+ // These relocations are supposed to subtract the TOC address from
+ // the final value. This does not fit cleanly into the RuntimeDyld
+ // scheme, since there may be *two* sections involved in determining
+ // the relocation value (the section of the symbol refered to by the
+ // relocation, and the TOC section associated with the current module).
+ //
+ // Fortunately, these relocations are currently only ever generated
+ // refering to symbols that themselves reside in the TOC, which means
+ // that the two sections are actually the same. Thus they cancel out
+ // and we can immediately resolve the relocation right now.
+ switch (RelType) {
+ case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
+ case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
+ case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
+ case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
+ case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
+ case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
+ default: llvm_unreachable("Wrong relocation type.");
+ }
+
+ RelocationValueRef TOCValue;
+ findPPC64TOCSection(Obj, ObjSectionToID, TOCValue);
+ if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
+ llvm_unreachable("Unsupported TOC relocation.");
+ Value.Addend -= TOCValue.Addend;
+ resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0);
} else {
+ // There are two ways to refer to the TOC address directly: either
+ // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
+ // ignored), or via any relocation that refers to the magic ".TOC."
+ // symbols (in which case the addend is respected).
+ if (RelType == ELF::R_PPC64_TOC) {
+ RelType = ELF::R_PPC64_ADDR64;
+ findPPC64TOCSection(Obj, ObjSectionToID, Value);
+ } else if (TargetName == ".TOC.") {
+ findPPC64TOCSection(Obj, ObjSectionToID, Value);
+ Value.Addend += Addend;
+ }
+
RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
- // Extra check to avoid relocation againt empty symbols (usually
- // the R_PPC64_TOC).
- if (SymType != SymbolRef::ST_Unknown && TargetName.empty())
- Value.SymbolName = NULL;
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
@@ -1153,8 +1291,7 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
addRelocationForSection(RE, Value.SectionID);
}
} else if (Arch == Triple::systemz &&
- (RelType == ELF::R_390_PLT32DBL ||
- RelType == ELF::R_390_GOTENT)) {
+ (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
// Create function stubs for both PLT and GOT references, regardless of
// whether the GOT reference is to data or code. The stub contains the
// full address of the symbol, as needed by GOT references, and the
@@ -1179,14 +1316,14 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
uintptr_t BaseAddress = uintptr_t(Section.Address);
uintptr_t StubAlignment = getStubAlignment();
- StubAddress = (BaseAddress + Section.StubOffset +
- StubAlignment - 1) & -StubAlignment;
+ StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) &
+ -StubAlignment;
unsigned StubOffset = StubAddress - BaseAddress;
Stubs[Value] = StubOffset;
createStubFunction((uint8_t *)StubAddress);
- RelocationEntry RE(SectionID, StubOffset + 8,
- ELF::R_390_64, Value.Addend - Addend);
+ RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
+ Value.Addend - Addend);
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
@@ -1195,15 +1332,17 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
}
if (RelType == ELF::R_390_GOTENT)
- resolveRelocation(Section, Offset, StubAddress + 8,
- ELF::R_390_PC32DBL, Addend);
+ resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL,
+ Addend);
else
resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
} else if (Arch == Triple::x86_64 && RelType == ELF::R_X86_64_PLT32) {
- // The way the PLT relocations normally work is that the linker allocates the
+ // The way the PLT relocations normally work is that the linker allocates
+ // the
// PLT and this relocation makes a PC-relative call into the PLT. The PLT
- // entry will then jump to an address provided by the GOT. On first call, the
- // GOT address will point back into PLT code that resolves the symbol. After
+ // entry will then jump to an address provided by the GOT. On first call,
+ // the
+ // GOT address will point back into PLT code that resolves the symbol. After
// the first call, the GOT entry points to the actual function.
//
// For local functions we're ignoring all of that here and just replacing
@@ -1229,8 +1368,8 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
uintptr_t BaseAddress = uintptr_t(Section.Address);
uintptr_t StubAlignment = getStubAlignment();
- StubAddress = (BaseAddress + Section.StubOffset +
- StubAlignment - 1) & -StubAlignment;
+ StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) &
+ -StubAlignment;
unsigned StubOffset = StubAddress - BaseAddress;
Stubs[Value] = StubOffset;
createStubFunction((uint8_t *)StubAddress);
@@ -1239,8 +1378,8 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
GOTEntries.push_back(Value);
// Make our stub function a relative call to the GOT entry.
- RelocationEntry RE(SectionID, StubOffset + 2,
- ELF::R_X86_64_GOTPCREL, -4);
+ RelocationEntry RE(SectionID, StubOffset + 2, ELF::R_X86_64_GOTPCREL,
+ -4);
addRelocationForSymbol(RE, Value.SymbolName);
// Bump our stub offset counter
@@ -1248,8 +1387,8 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
}
// Make the target call a call into the stub table.
- resolveRelocation(Section, Offset, StubAddress,
- ELF::R_X86_64_PC32, Addend);
+ resolveRelocation(Section, Offset, StubAddress, ELF::R_X86_64_PC32,
+ Addend);
} else {
RelocationEntry RE(SectionID, Offset, ELF::R_X86_64_PC32, Value.Addend,
Value.Offset);
@@ -1265,17 +1404,19 @@ void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
else
addRelocationForSection(RE, Value.SectionID);
}
+ return ++RelI;
}
void RuntimeDyldELF::updateGOTEntries(StringRef Name, uint64_t Addr) {
- SmallVectorImpl<std::pair<SID, GOTRelocations> >::iterator it;
- SmallVectorImpl<std::pair<SID, GOTRelocations> >::iterator end = GOTs.end();
+ SmallVectorImpl<std::pair<SID, GOTRelocations>>::iterator it;
+ SmallVectorImpl<std::pair<SID, GOTRelocations>>::iterator end = GOTs.end();
for (it = GOTs.begin(); it != end; ++it) {
GOTRelocations &GOTEntries = it->second;
for (int i = 0, e = GOTEntries.size(); i != e; ++i) {
- if (GOTEntries[i].SymbolName != 0 && GOTEntries[i].SymbolName == Name) {
+ if (GOTEntries[i].SymbolName != nullptr &&
+ GOTEntries[i].SymbolName == Name) {
GOTEntries[i].Offset = Addr;
}
}
@@ -1289,6 +1430,9 @@ size_t RuntimeDyldELF::getGOTEntrySize() {
switch (Arch) {
case Triple::x86_64:
case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::arm64:
+ case Triple::arm64_be:
case Triple::ppc64:
case Triple::ppc64le:
case Triple::systemz:
@@ -1301,18 +1445,19 @@ size_t RuntimeDyldELF::getGOTEntrySize() {
case Triple::mipsel:
Result = sizeof(uint32_t);
break;
- default: llvm_unreachable("Unsupported CPU type!");
+ default:
+ llvm_unreachable("Unsupported CPU type!");
}
return Result;
}
-uint64_t RuntimeDyldELF::findGOTEntry(uint64_t LoadAddress,
- uint64_t Offset) {
+uint64_t RuntimeDyldELF::findGOTEntry(uint64_t LoadAddress, uint64_t Offset) {
const size_t GOTEntrySize = getGOTEntrySize();
- SmallVectorImpl<std::pair<SID, GOTRelocations> >::const_iterator it;
- SmallVectorImpl<std::pair<SID, GOTRelocations> >::const_iterator end = GOTs.end();
+ SmallVectorImpl<std::pair<SID, GOTRelocations>>::const_iterator it;
+ SmallVectorImpl<std::pair<SID, GOTRelocations>>::const_iterator end =
+ GOTs.end();
int GOTIndex = -1;
for (it = GOTs.begin(); it != end; ++it) {
@@ -1322,7 +1467,7 @@ uint64_t RuntimeDyldELF::findGOTEntry(uint64_t LoadAddress,
// Find the matching entry in our vector.
uint64_t SymbolOffset = 0;
for (int i = 0, e = GOTEntries.size(); i != e; ++i) {
- if (GOTEntries[i].SymbolName == 0) {
+ if (!GOTEntries[i].SymbolName) {
if (getSectionLoadAddress(GOTEntries[i].SectionID) == LoadAddress &&
GOTEntries[i].Offset == Offset) {
GOTIndex = i;
@@ -1342,11 +1487,11 @@ uint64_t RuntimeDyldELF::findGOTEntry(uint64_t LoadAddress,
if (GOTIndex != -1) {
if (GOTEntrySize == sizeof(uint64_t)) {
- uint64_t *LocalGOTAddr = (uint64_t*)getSectionAddress(GOTSectionID);
+ uint64_t *LocalGOTAddr = (uint64_t *)getSectionAddress(GOTSectionID);
// Fill in this entry with the address of the symbol being referenced.
LocalGOTAddr[GOTIndex] = LoadAddress + SymbolOffset;
} else {
- uint32_t *LocalGOTAddr = (uint32_t*)getSectionAddress(GOTSectionID);
+ uint32_t *LocalGOTAddr = (uint32_t *)getSectionAddress(GOTSectionID);
// Fill in this entry with the address of the symbol being referenced.
LocalGOTAddr[GOTIndex] = (uint32_t)(LoadAddress + SymbolOffset);
}
@@ -1360,7 +1505,8 @@ uint64_t RuntimeDyldELF::findGOTEntry(uint64_t LoadAddress,
return 0;
}
-void RuntimeDyldELF::finalizeLoad(ObjSectionToIDMap &SectionMap) {
+void RuntimeDyldELF::finalizeLoad(ObjectImage &ObjImg,
+ ObjSectionToIDMap &SectionMap) {
// If necessary, allocate the global offset table
if (MemMgr) {
// Allocate the GOT if necessary
@@ -1380,8 +1526,7 @@ void RuntimeDyldELF::finalizeLoad(ObjSectionToIDMap &SectionMap) {
// needed when GOT-based relocations are applied.
memset(Addr, 0, TotalSize);
}
- }
- else {
+ } else {
report_fatal_error("Unable to allocate memory for GOT!");
}
@@ -1401,6 +1546,12 @@ void RuntimeDyldELF::finalizeLoad(ObjSectionToIDMap &SectionMap) {
bool RuntimeDyldELF::isCompatibleFormat(const ObjectBuffer *Buffer) const {
if (Buffer->getBufferSize() < strlen(ELF::ElfMagic))
return false;
- return (memcmp(Buffer->getBufferStart(), ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
+ return (memcmp(Buffer->getBufferStart(), ELF::ElfMagic,
+ strlen(ELF::ElfMagic))) == 0;
+}
+
+bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile *Obj) const {
+ return Obj->isELF();
}
+
} // namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
index 3adf827..59fdfbe 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -20,70 +20,46 @@
using namespace llvm;
namespace llvm {
-
namespace {
- // Helper for extensive error checking in debug builds.
- error_code Check(error_code Err) {
- if (Err) {
- report_fatal_error(Err.message());
- }
- return Err;
+// Helper for extensive error checking in debug builds.
+std::error_code Check(std::error_code Err) {
+ if (Err) {
+ report_fatal_error(Err.message());
}
+ return Err;
+}
} // end anonymous namespace
class RuntimeDyldELF : public RuntimeDyldImpl {
- void resolveRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
- uint64_t SymOffset=0);
-
- void resolveX86_64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
+ void resolveRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset = 0);
+
+ void resolveX86_64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
uint64_t SymOffset);
- void resolveX86Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend);
-
- void resolveAArch64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend);
-
- void resolveARMRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend);
-
- void resolveMIPSRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint32_t Value,
- uint32_t Type,
- int32_t Addend);
-
- void resolvePPC64Relocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend);
-
- void resolveSystemZRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend);
-
- unsigned getMaxStubSize() {
- if (Arch == Triple::aarch64)
+ void resolveX86Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolveAArch64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveARMRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolveMIPSRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolvePPC64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveSystemZRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ unsigned getMaxStubSize() override {
+ if (Arch == Triple::aarch64 || Arch == Triple::arm64 ||
+ Arch == Triple::aarch64_be || Arch == Triple::arm64_be)
return 20; // movz; movk; movk; movk; br
if (Arch == Triple::arm || Arch == Triple::thumb)
return 8; // 32-bit instruction and 32-bit address
@@ -99,24 +75,24 @@ class RuntimeDyldELF : public RuntimeDyldImpl {
return 0;
}
- unsigned getStubAlignment() {
+ unsigned getStubAlignment() override {
if (Arch == Triple::systemz)
return 8;
else
return 1;
}
- uint64_t findPPC64TOC() const;
- void findOPDEntrySection(ObjectImage &Obj,
- ObjSectionToIDMap &LocalSections,
+ void findPPC64TOCSection(ObjectImage &Obj, ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
+ void findOPDEntrySection(ObjectImage &Obj, ObjSectionToIDMap &LocalSections,
RelocationValueRef &Rel);
uint64_t findGOTEntry(uint64_t LoadAddr, uint64_t Offset);
size_t getGOTEntrySize();
- virtual void updateGOTEntries(StringRef Name, uint64_t Addr);
+ void updateGOTEntries(StringRef Name, uint64_t Addr) override;
- // Relocation entries for symbols whose position-independant offset is
+ // Relocation entries for symbols whose position-independent offset is
// updated in a global offset table.
typedef SmallVector<RelocationValueRef, 2> GOTRelocations;
GOTRelocations GOTEntries; // List of entries requiring finalization.
@@ -129,22 +105,23 @@ class RuntimeDyldELF : public RuntimeDyldImpl {
SmallVector<SID, 2> RegisteredEHFrameSections;
public:
- RuntimeDyldELF(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm)
- {}
-
- virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value);
- virtual void processRelocationRef(unsigned SectionID,
- RelocationRef RelI,
- ObjectImage &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- const SymbolTableMap &Symbols,
- StubMap &Stubs);
- virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const;
- virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer);
- virtual void registerEHFrames();
- virtual void deregisterEHFrames();
- virtual void finalizeLoad(ObjSectionToIDMap &SectionMap);
+ RuntimeDyldELF(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {}
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override;
+ bool isCompatibleFormat(const ObjectBuffer *Buffer) const override;
+ bool isCompatibleFile(const object::ObjectFile *Buffer) const override;
+ void registerEHFrames() override;
+ void deregisterEHFrames() override;
+ void finalizeLoad(ObjectImage &ObjImg,
+ ObjSectionToIDMap &SectionMap) override;
virtual ~RuntimeDyldELF();
+
+ static ObjectImage *createObjectImage(ObjectBuffer *InputBuffer);
+ static ObjectImage *createObjectImageFromFile(std::unique_ptr<object::ObjectFile> Obj);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
index 3014b30..0211d2b 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ExecutionEngine/ObjectImage.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -28,8 +29,8 @@
#include "llvm/Support/Mutex.h"
#include "llvm/Support/SwapByteOrder.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/system_error.h"
#include <map>
+#include <system_error>
using namespace llvm;
using namespace llvm::object;
@@ -39,7 +40,6 @@ namespace llvm {
class ObjectBuffer;
class Twine;
-
/// SectionEntry - represents a section emitted into memory by the dynamic
/// linker.
class SectionEntry {
@@ -69,8 +69,9 @@ public:
SectionEntry(StringRef name, uint8_t *address, size_t size,
uintptr_t objAddress)
- : Name(name), Address(address), Size(size), LoadAddress((uintptr_t)address),
- StubOffset(size), ObjAddress(objAddress) {}
+ : Name(name), Address(address), Size(size),
+ LoadAddress((uintptr_t)address), StubOffset(size),
+ ObjAddress(objAddress) {}
};
/// RelocationEntry - used to represent relocations internally in the dynamic
@@ -90,9 +91,17 @@ public:
/// used to make a relocation section relative instead of symbol relative.
int64_t Addend;
+ struct SectionPair {
+ uint32_t SectionA;
+ uint32_t SectionB;
+ };
+
/// SymOffset - Section offset of the relocation entry's symbol (used for GOT
/// lookup).
- uint64_t SymOffset;
+ union {
+ uint64_t SymOffset;
+ SectionPair Sections;
+ };
/// True if this is a PCRel relocation (MachO specific).
bool IsPCRel;
@@ -101,33 +110,44 @@ public:
unsigned Size;
RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend)
- : SectionID(id), Offset(offset), RelType(type), Addend(addend),
- SymOffset(0), IsPCRel(false), Size(0) {}
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(0), IsPCRel(false), Size(0) {}
RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
uint64_t symoffset)
- : SectionID(id), Offset(offset), RelType(type), Addend(addend),
- SymOffset(symoffset), IsPCRel(false), Size(0) {}
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(symoffset), IsPCRel(false), Size(0) {}
RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
bool IsPCRel, unsigned Size)
- : SectionID(id), Offset(offset), RelType(type), Addend(addend),
- SymOffset(0), IsPCRel(IsPCRel), Size(Size) {}
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(0), IsPCRel(IsPCRel), Size(Size) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
+ uint64_t SectionBOffset, bool IsPCRel, unsigned Size)
+ : SectionID(id), Offset(offset), RelType(type),
+ Addend(SectionAOffset - SectionBOffset + addend), IsPCRel(IsPCRel),
+ Size(Size) {
+ Sections.SectionA = SectionA;
+ Sections.SectionB = SectionB;
+ }
};
class RelocationValueRef {
public:
- unsigned SectionID;
- uint64_t Offset;
- int64_t Addend;
+ unsigned SectionID;
+ uint64_t Offset;
+ int64_t Addend;
const char *SymbolName;
- RelocationValueRef(): SectionID(0), Offset(0), Addend(0), SymbolName(0) {}
+ RelocationValueRef() : SectionID(0), Offset(0), Addend(0),
+ SymbolName(nullptr) {}
inline bool operator==(const RelocationValueRef &Other) const {
return SectionID == Other.SectionID && Offset == Other.Offset &&
Addend == Other.Addend && SymbolName == Other.SymbolName;
}
- inline bool operator <(const RelocationValueRef &Other) const {
+ inline bool operator<(const RelocationValueRef &Other) const {
if (SectionID != Other.SectionID)
return SectionID < Other.SectionID;
if (Offset != Other.Offset)
@@ -139,6 +159,15 @@ public:
};
class RuntimeDyldImpl {
+ friend class RuntimeDyldChecker;
+private:
+
+ uint64_t getAnySymbolRemoteAddress(StringRef Symbol) {
+ if (uint64_t InternalSymbolAddr = getSymbolLoadAddress(Symbol))
+ return InternalSymbolAddr;
+ return MemMgr->getSymbolAddress(Symbol);
+ }
+
protected:
// The MemoryManager to load objects into.
RTDyldMemoryManager *MemMgr;
@@ -149,7 +178,7 @@ protected:
SectionList Sections;
typedef unsigned SID; // Type for SectionIDs
- #define RTDYLD_INVALID_SECTION_ID ((SID)(-1))
+#define RTDYLD_INVALID_SECTION_ID ((SID)(-1))
// Keep a map of sections from object file to the SectionID which
// references it.
@@ -187,6 +216,10 @@ protected:
Triple::ArchType Arch;
bool IsTargetLittleEndian;
+ // True if all sections should be passed to the memory manager, false if only
+ // sections containing relocations should be. Defaults to 'false'.
+ bool ProcessAllSections;
+
// This mutex prevents simultaneously loading objects from two different
// threads. This keeps us from having to protect individual data structures
// and guarantees that section allocation requests to the memory manager
@@ -217,52 +250,49 @@ protected:
}
uint8_t *getSectionAddress(unsigned SectionID) {
- return (uint8_t*)Sections[SectionID].Address;
+ return (uint8_t *)Sections[SectionID].Address;
}
void writeInt16BE(uint8_t *Addr, uint16_t Value) {
if (IsTargetLittleEndian)
- Value = sys::SwapByteOrder(Value);
- *Addr = (Value >> 8) & 0xFF;
- *(Addr+1) = Value & 0xFF;
+ sys::swapByteOrder(Value);
+ *Addr = (Value >> 8) & 0xFF;
+ *(Addr + 1) = Value & 0xFF;
}
void writeInt32BE(uint8_t *Addr, uint32_t Value) {
if (IsTargetLittleEndian)
- Value = sys::SwapByteOrder(Value);
- *Addr = (Value >> 24) & 0xFF;
- *(Addr+1) = (Value >> 16) & 0xFF;
- *(Addr+2) = (Value >> 8) & 0xFF;
- *(Addr+3) = Value & 0xFF;
+ sys::swapByteOrder(Value);
+ *Addr = (Value >> 24) & 0xFF;
+ *(Addr + 1) = (Value >> 16) & 0xFF;
+ *(Addr + 2) = (Value >> 8) & 0xFF;
+ *(Addr + 3) = Value & 0xFF;
}
void writeInt64BE(uint8_t *Addr, uint64_t Value) {
if (IsTargetLittleEndian)
- Value = sys::SwapByteOrder(Value);
- *Addr = (Value >> 56) & 0xFF;
- *(Addr+1) = (Value >> 48) & 0xFF;
- *(Addr+2) = (Value >> 40) & 0xFF;
- *(Addr+3) = (Value >> 32) & 0xFF;
- *(Addr+4) = (Value >> 24) & 0xFF;
- *(Addr+5) = (Value >> 16) & 0xFF;
- *(Addr+6) = (Value >> 8) & 0xFF;
- *(Addr+7) = Value & 0xFF;
+ sys::swapByteOrder(Value);
+ *Addr = (Value >> 56) & 0xFF;
+ *(Addr + 1) = (Value >> 48) & 0xFF;
+ *(Addr + 2) = (Value >> 40) & 0xFF;
+ *(Addr + 3) = (Value >> 32) & 0xFF;
+ *(Addr + 4) = (Value >> 24) & 0xFF;
+ *(Addr + 5) = (Value >> 16) & 0xFF;
+ *(Addr + 6) = (Value >> 8) & 0xFF;
+ *(Addr + 7) = Value & 0xFF;
}
/// \brief Given the common symbols discovered in the object file, emit a
/// new section for them and update the symbol mappings in the object and
/// symbol table.
- void emitCommonSymbols(ObjectImage &Obj,
- const CommonSymbolMap &CommonSymbols,
- uint64_t TotalSize,
- SymbolTableMap &SymbolTable);
+ void emitCommonSymbols(ObjectImage &Obj, const CommonSymbolMap &CommonSymbols,
+ uint64_t TotalSize, SymbolTableMap &SymbolTable);
/// \brief Emits section data from the object file to the MemoryManager.
/// \param IsCode if it's true then allocateCodeSection() will be
/// used for emits, else allocateDataSection() will be used.
/// \return SectionID.
- unsigned emitSection(ObjectImage &Obj,
- const SectionRef &Section,
+ unsigned emitSection(ObjectImage &Obj, const SectionRef &Section,
bool IsCode);
/// \brief Find Section in LocalSections. If the secton is not found - emit
@@ -270,10 +300,8 @@ protected:
/// \param IsCode if it's true then allocateCodeSection() will be
/// used for emmits, else allocateDataSection() will be used.
/// \return SectionID.
- unsigned findOrEmitSection(ObjectImage &Obj,
- const SectionRef &Section,
- bool IsCode,
- ObjSectionToIDMap &LocalSections);
+ unsigned findOrEmitSection(ObjectImage &Obj, const SectionRef &Section,
+ bool IsCode, ObjSectionToIDMap &LocalSections);
// \brief Add a relocation entry that uses the given section.
void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID);
@@ -284,7 +312,7 @@ protected:
/// \brief Emits long jump instruction to Addr.
/// \return Pointer to the memory area for emitting target address.
- uint8_t* createStubFunction(uint8_t *Addr);
+ uint8_t *createStubFunction(uint8_t *Addr, unsigned AbiVariant = 0);
/// \brief Resolves relocations from Relocs list with address from Value.
void resolveRelocationList(const RelocationList &Relocs, uint64_t Value);
@@ -294,14 +322,14 @@ protected:
/// \param Value Target symbol address to apply the relocation action
virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value) = 0;
- /// \brief Parses the object file relocation and stores it to Relocations
- /// or SymbolRelocations (this depends on the object file type).
- virtual void processRelocationRef(unsigned SectionID,
- RelocationRef RelI,
- ObjectImage &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- const SymbolTableMap &Symbols,
- StubMap &Stubs) = 0;
+ /// \brief Parses one or more object file relocations (some object files use
+ /// relocation pairs) and stores it to Relocations or SymbolRelocations
+ /// (this depends on the object file type).
+ /// \return Iterator to the next relocation that needs to be parsed.
+ virtual relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) = 0;
/// \brief Resolve relocations to external symbols.
void resolveExternalSymbols();
@@ -310,20 +338,34 @@ protected:
// The base class does nothing. ELF overrides this.
virtual void updateGOTEntries(StringRef Name, uint64_t Addr) {}
- virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer);
+ // \brief Compute an upper bound of the memory that is required to load all
+ // sections
+ void computeTotalAllocSize(ObjectImage &Obj, uint64_t &CodeSize,
+ uint64_t &DataSizeRO, uint64_t &DataSizeRW);
+
+ // \brief Compute the stub buffer size required for a section
+ unsigned computeSectionStubBufSize(ObjectImage &Obj,
+ const SectionRef &Section);
+
public:
- RuntimeDyldImpl(RTDyldMemoryManager *mm) : MemMgr(mm), HasError(false) {}
+ RuntimeDyldImpl(RTDyldMemoryManager *mm)
+ : MemMgr(mm), ProcessAllSections(false), HasError(false) {
+ }
virtual ~RuntimeDyldImpl();
- ObjectImage *loadObject(ObjectBuffer *InputBuffer);
+ void setProcessAllSections(bool ProcessAllSections) {
+ this->ProcessAllSections = ProcessAllSections;
+ }
+
+ ObjectImage *loadObject(ObjectImage *InputObject);
- void *getSymbolAddress(StringRef Name) {
+ uint8_t* getSymbolAddress(StringRef Name) {
// FIXME: Just look up as a function for now. Overly simple of course.
// Work in progress.
SymbolTableMap::const_iterator pos = GlobalSymbolTable.find(Name);
if (pos == GlobalSymbolTable.end())
- return 0;
+ return nullptr;
SymbolLoc Loc = pos->second;
return getSectionAddress(Loc.first) + Loc.second;
}
@@ -354,15 +396,15 @@ public:
StringRef getErrorString() { return ErrorStr; }
virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const = 0;
+ virtual bool isCompatibleFile(const ObjectFile *Obj) const = 0;
virtual void registerEHFrames();
virtual void deregisterEHFrames();
- virtual void finalizeLoad(ObjSectionToIDMap &SectionMap) {}
+ virtual void finalizeLoad(ObjectImage &ObjImg, ObjSectionToIDMap &SectionMap) {}
};
} // end namespace llvm
-
#endif
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
index 5b92867..58fb515 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -11,28 +11,148 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dyld"
#include "RuntimeDyldMachO.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
+
+#include "Targets/RuntimeDyldMachOARM.h"
+#include "Targets/RuntimeDyldMachOAArch64.h"
+#include "Targets/RuntimeDyldMachOI386.h"
+#include "Targets/RuntimeDyldMachOX86_64.h"
+
using namespace llvm;
using namespace llvm::object;
+#define DEBUG_TYPE "dyld"
+
namespace llvm {
-static unsigned char *processFDE(unsigned char *P, intptr_t DeltaForText, intptr_t DeltaForEH) {
- uint32_t Length = *((uint32_t*)P);
+uint64_t RuntimeDyldMachO::decodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
+ uint32_t RelType) const {
+ uint64_t Addend = 0;
+ memcpy(&Addend, LocalAddress, NumBytes);
+ return Addend;
+}
+
+RelocationValueRef RuntimeDyldMachO::getRelocationValueRef(
+ ObjectImage &ObjImg, const relocation_iterator &RI,
+ const RelocationEntry &RE, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols) {
+
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+ RelocationValueRef Value;
+
+ bool IsExternal = Obj.getPlainRelocationExternal(RelInfo);
+ if (IsExternal) {
+ symbol_iterator Symbol = RI->getSymbol();
+ StringRef TargetName;
+ Symbol->getName(TargetName);
+ SymbolTableMap::const_iterator SI = Symbols.find(TargetName.data());
+ if (SI != Symbols.end()) {
+ Value.SectionID = SI->second.first;
+ Value.Addend = SI->second.second + RE.Addend;
+ } else {
+ SI = GlobalSymbolTable.find(TargetName.data());
+ if (SI != GlobalSymbolTable.end()) {
+ Value.SectionID = SI->second.first;
+ Value.Addend = SI->second.second + RE.Addend;
+ } else {
+ Value.SymbolName = TargetName.data();
+ Value.Addend = RE.Addend;
+ }
+ }
+ } else {
+ SectionRef Sec = Obj.getRelocationSection(RelInfo);
+ bool IsCode = false;
+ Sec.isText(IsCode);
+ Value.SectionID = findOrEmitSection(ObjImg, Sec, IsCode, ObjSectionToID);
+ uint64_t Addr;
+ Sec.getAddress(Addr);
+ Value.Addend = RE.Addend - Addr;
+ }
+
+ return Value;
+}
+
+void RuntimeDyldMachO::makeValueAddendPCRel(RelocationValueRef &Value,
+ ObjectImage &ObjImg,
+ const relocation_iterator &RI) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RelInfo);
+ if (IsPCRel) {
+ uint64_t RelocAddr = 0;
+ RI->getAddress(RelocAddr);
+ unsigned RelocSize = Obj.getAnyRelocationLength(RelInfo);
+ Value.Addend += RelocAddr + (1ULL << RelocSize);
+ }
+}
+
+void RuntimeDyldMachO::dumpRelocationToResolve(const RelocationEntry &RE,
+ uint64_t Value) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+
+ dbgs() << "resolveRelocation Section: " << RE.SectionID
+ << " LocalAddress: " << format("%p", LocalAddress)
+ << " FinalAddress: " << format("%p", FinalAddress)
+ << " Value: " << format("%p", Value) << " Addend: " << RE.Addend
+ << " isPCRel: " << RE.IsPCRel << " MachoType: " << RE.RelType
+ << " Size: " << (1 << RE.Size) << "\n";
+}
+
+bool RuntimeDyldMachO::writeBytesUnaligned(uint8_t *Addr, uint64_t Value,
+ unsigned Size) {
+ for (unsigned i = 0; i < Size; ++i) {
+ *Addr++ = (uint8_t)Value;
+ Value >>= 8;
+ }
+
+ return false;
+}
+
+bool
+RuntimeDyldMachO::isCompatibleFormat(const ObjectBuffer *InputBuffer) const {
+ if (InputBuffer->getBufferSize() < 4)
+ return false;
+ StringRef Magic(InputBuffer->getBufferStart(), 4);
+ if (Magic == "\xFE\xED\xFA\xCE")
+ return true;
+ if (Magic == "\xCE\xFA\xED\xFE")
+ return true;
+ if (Magic == "\xFE\xED\xFA\xCF")
+ return true;
+ if (Magic == "\xCF\xFA\xED\xFE")
+ return true;
+ return false;
+}
+
+bool RuntimeDyldMachO::isCompatibleFile(const object::ObjectFile *Obj) const {
+ return Obj->isMachO();
+}
+
+static unsigned char *processFDE(unsigned char *P, intptr_t DeltaForText,
+ intptr_t DeltaForEH) {
+ DEBUG(dbgs() << "Processing FDE: Delta for text: " << DeltaForText
+ << ", Delta for EH: " << DeltaForEH << "\n");
+ uint32_t Length = *((uint32_t *)P);
P += 4;
unsigned char *Ret = P + Length;
- uint32_t Offset = *((uint32_t*)P);
+ uint32_t Offset = *((uint32_t *)P);
if (Offset == 0) // is a CIE
return Ret;
P += 4;
- intptr_t FDELocation = *((intptr_t*)P);
+ intptr_t FDELocation = *((intptr_t *)P);
intptr_t NewLocation = FDELocation - DeltaForText;
- *((intptr_t*)P) = NewLocation;
+ *((intptr_t *)P) = NewLocation;
P += sizeof(intptr_t);
// Skip the FDE address range
@@ -41,16 +161,16 @@ static unsigned char *processFDE(unsigned char *P, intptr_t DeltaForText, intptr
uint8_t Augmentationsize = *P;
P += 1;
if (Augmentationsize != 0) {
- intptr_t LSDA = *((intptr_t*)P);
+ intptr_t LSDA = *((intptr_t *)P);
intptr_t NewLSDA = LSDA - DeltaForEH;
- *((intptr_t*)P) = NewLSDA;
+ *((intptr_t *)P) = NewLSDA;
}
return Ret;
}
static intptr_t computeDelta(SectionEntry *A, SectionEntry *B) {
- intptr_t ObjDistance = A->ObjAddress - B->ObjAddress;
+ intptr_t ObjDistance = A->ObjAddress - B->ObjAddress;
intptr_t MemDistance = A->LoadAddress - B->LoadAddress;
return ObjDistance - MemDistance;
}
@@ -66,7 +186,7 @@ void RuntimeDyldMachO::registerEHFrames() {
continue;
SectionEntry *Text = &Sections[SectionInfo.TextSID];
SectionEntry *EHFrame = &Sections[SectionInfo.EHFrameSID];
- SectionEntry *ExceptTab = NULL;
+ SectionEntry *ExceptTab = nullptr;
if (SectionInfo.ExceptTabSID != RTDYLD_INVALID_SECTION_ID)
ExceptTab = &Sections[SectionInfo.ExceptTabSID];
@@ -77,382 +197,27 @@ void RuntimeDyldMachO::registerEHFrames() {
unsigned char *P = EHFrame->Address;
unsigned char *End = P + EHFrame->Size;
- do {
+ do {
P = processFDE(P, DeltaForText, DeltaForEH);
- } while(P != End);
+ } while (P != End);
- MemMgr->registerEHFrames(EHFrame->Address,
- EHFrame->LoadAddress,
+ MemMgr->registerEHFrames(EHFrame->Address, EHFrame->LoadAddress,
EHFrame->Size);
}
UnregisteredEHFrameSections.clear();
}
-void RuntimeDyldMachO::finalizeLoad(ObjSectionToIDMap &SectionMap) {
- unsigned EHFrameSID = RTDYLD_INVALID_SECTION_ID;
- unsigned TextSID = RTDYLD_INVALID_SECTION_ID;
- unsigned ExceptTabSID = RTDYLD_INVALID_SECTION_ID;
- ObjSectionToIDMap::iterator i, e;
- for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
- const SectionRef &Section = i->first;
- StringRef Name;
- Section.getName(Name);
- if (Name == "__eh_frame")
- EHFrameSID = i->second;
- else if (Name == "__text")
- TextSID = i->second;
- else if (Name == "__gcc_except_tab")
- ExceptTabSID = i->second;
- }
- UnregisteredEHFrameSections.push_back(EHFrameRelatedSections(EHFrameSID,
- TextSID,
- ExceptTabSID));
-}
-
-// The target location for the relocation is described by RE.SectionID and
-// RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
-// SectionEntry has three members describing its location.
-// SectionEntry::Address is the address at which the section has been loaded
-// into memory in the current (host) process. SectionEntry::LoadAddress is the
-// address that the section will have in the target process.
-// SectionEntry::ObjAddress is the address of the bits for this section in the
-// original emitted object image (also in the current address space).
-//
-// Relocations will be applied as if the section were loaded at
-// SectionEntry::LoadAddress, but they will be applied at an address based
-// on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
-// Target memory contents if they are required for value calculations.
-//
-// The Value parameter here is the load address of the symbol for the
-// relocation to be applied. For relocations which refer to symbols in the
-// current object Value will be the LoadAddress of the section in which
-// the symbol resides (RE.Addend provides additional information about the
-// symbol location). For external symbols, Value will be the address of the
-// symbol in the target address space.
-void RuntimeDyldMachO::resolveRelocation(const RelocationEntry &RE,
- uint64_t Value) {
- const SectionEntry &Section = Sections[RE.SectionID];
- return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
- RE.IsPCRel, RE.Size);
-}
-
-void RuntimeDyldMachO::resolveRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
- bool isPCRel,
- unsigned LogSize) {
- uint8_t *LocalAddress = Section.Address + Offset;
- uint64_t FinalAddress = Section.LoadAddress + Offset;
- unsigned MachoType = Type;
- unsigned Size = 1 << LogSize;
-
- DEBUG(dbgs() << "resolveRelocation LocalAddress: "
- << format("%p", LocalAddress)
- << " FinalAddress: " << format("%p", FinalAddress)
- << " Value: " << format("%p", Value)
- << " Addend: " << Addend
- << " isPCRel: " << isPCRel
- << " MachoType: " << MachoType
- << " Size: " << Size
- << "\n");
-
- // This just dispatches to the proper target specific routine.
+std::unique_ptr<RuntimeDyldMachO>
+llvm::RuntimeDyldMachO::create(Triple::ArchType Arch, RTDyldMemoryManager *MM) {
switch (Arch) {
- default: llvm_unreachable("Unsupported CPU type!");
- case Triple::x86_64:
- resolveX86_64Relocation(LocalAddress,
- FinalAddress,
- (uintptr_t)Value,
- isPCRel,
- MachoType,
- Size,
- Addend);
- break;
- case Triple::x86:
- resolveI386Relocation(LocalAddress,
- FinalAddress,
- (uintptr_t)Value,
- isPCRel,
- MachoType,
- Size,
- Addend);
- break;
- case Triple::arm: // Fall through.
- case Triple::thumb:
- resolveARMRelocation(LocalAddress,
- FinalAddress,
- (uintptr_t)Value,
- isPCRel,
- MachoType,
- Size,
- Addend);
- break;
- }
-}
-
-bool RuntimeDyldMachO::resolveI386Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend) {
- if (isPCRel)
- Value -= FinalAddress + 4; // see resolveX86_64Relocation
-
- switch (Type) {
- default:
- llvm_unreachable("Invalid relocation type!");
- case MachO::GENERIC_RELOC_VANILLA: {
- uint8_t *p = LocalAddress;
- uint64_t ValueToWrite = Value + Addend;
- for (unsigned i = 0; i < Size; ++i) {
- *p++ = (uint8_t)(ValueToWrite & 0xff);
- ValueToWrite >>= 8;
- }
- return false;
- }
- case MachO::GENERIC_RELOC_SECTDIFF:
- case MachO::GENERIC_RELOC_LOCAL_SECTDIFF:
- case MachO::GENERIC_RELOC_PB_LA_PTR:
- return Error("Relocation type not implemented yet!");
- }
-}
-
-bool RuntimeDyldMachO::resolveX86_64Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend) {
- // If the relocation is PC-relative, the value to be encoded is the
- // pointer difference.
- if (isPCRel)
- // FIXME: It seems this value needs to be adjusted by 4 for an effective PC
- // address. Is that expected? Only for branches, perhaps?
- Value -= FinalAddress + 4;
-
- switch(Type) {
- default:
- llvm_unreachable("Invalid relocation type!");
- case MachO::X86_64_RELOC_SIGNED_1:
- case MachO::X86_64_RELOC_SIGNED_2:
- case MachO::X86_64_RELOC_SIGNED_4:
- case MachO::X86_64_RELOC_SIGNED:
- case MachO::X86_64_RELOC_UNSIGNED:
- case MachO::X86_64_RELOC_BRANCH: {
- Value += Addend;
- // Mask in the target value a byte at a time (we don't have an alignment
- // guarantee for the target address, so this is safest).
- uint8_t *p = (uint8_t*)LocalAddress;
- for (unsigned i = 0; i < Size; ++i) {
- *p++ = (uint8_t)Value;
- Value >>= 8;
- }
- return false;
- }
- case MachO::X86_64_RELOC_GOT_LOAD:
- case MachO::X86_64_RELOC_GOT:
- case MachO::X86_64_RELOC_SUBTRACTOR:
- case MachO::X86_64_RELOC_TLV:
- return Error("Relocation type not implemented yet!");
- }
-}
-
-bool RuntimeDyldMachO::resolveARMRelocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend) {
- // If the relocation is PC-relative, the value to be encoded is the
- // pointer difference.
- if (isPCRel) {
- Value -= FinalAddress;
- // ARM PCRel relocations have an effective-PC offset of two instructions
- // (four bytes in Thumb mode, 8 bytes in ARM mode).
- // FIXME: For now, assume ARM mode.
- Value -= 8;
- }
-
- switch(Type) {
default:
- llvm_unreachable("Invalid relocation type!");
- case MachO::ARM_RELOC_VANILLA: {
- // Mask in the target value a byte at a time (we don't have an alignment
- // guarantee for the target address, so this is safest).
- uint8_t *p = (uint8_t*)LocalAddress;
- for (unsigned i = 0; i < Size; ++i) {
- *p++ = (uint8_t)Value;
- Value >>= 8;
- }
+ llvm_unreachable("Unsupported target for RuntimeDyldMachO.");
break;
+ case Triple::arm: return make_unique<RuntimeDyldMachOARM>(MM);
+ case Triple::arm64: return make_unique<RuntimeDyldMachOAArch64>(MM);
+ case Triple::x86: return make_unique<RuntimeDyldMachOI386>(MM);
+ case Triple::x86_64: return make_unique<RuntimeDyldMachOX86_64>(MM);
}
- case MachO::ARM_RELOC_BR24: {
- // Mask the value into the target address. We know instructions are
- // 32-bit aligned, so we can do it all at once.
- uint32_t *p = (uint32_t*)LocalAddress;
- // The low two bits of the value are not encoded.
- Value >>= 2;
- // Mask the value to 24 bits.
- Value &= 0xffffff;
- // FIXME: If the destination is a Thumb function (and the instruction
- // is a non-predicated BL instruction), we need to change it to a BLX
- // instruction instead.
-
- // Insert the value into the instruction.
- *p = (*p & ~0xffffff) | Value;
- break;
- }
- case MachO::ARM_THUMB_RELOC_BR22:
- case MachO::ARM_THUMB_32BIT_BRANCH:
- case MachO::ARM_RELOC_HALF:
- case MachO::ARM_RELOC_HALF_SECTDIFF:
- case MachO::ARM_RELOC_PAIR:
- case MachO::ARM_RELOC_SECTDIFF:
- case MachO::ARM_RELOC_LOCAL_SECTDIFF:
- case MachO::ARM_RELOC_PB_LA_PTR:
- return Error("Relocation type not implemented yet!");
- }
- return false;
-}
-
-void RuntimeDyldMachO::processRelocationRef(unsigned SectionID,
- RelocationRef RelI,
- ObjectImage &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- const SymbolTableMap &Symbols,
- StubMap &Stubs) {
- const ObjectFile *OF = Obj.getObjectFile();
- const MachOObjectFile *MachO = static_cast<const MachOObjectFile*>(OF);
- MachO::any_relocation_info RE= MachO->getRelocation(RelI.getRawDataRefImpl());
-
- uint32_t RelType = MachO->getAnyRelocationType(RE);
-
- // FIXME: Properly handle scattered relocations.
- // For now, optimistically skip these: they can often be ignored, as
- // the static linker will already have applied the relocation, and it
- // only needs to be reapplied if symbols move relative to one another.
- // Note: This will fail horribly where the relocations *do* need to be
- // applied, but that was already the case.
- if (MachO->isRelocationScattered(RE))
- return;
-
- RelocationValueRef Value;
- SectionEntry &Section = Sections[SectionID];
-
- bool isExtern = MachO->getPlainRelocationExternal(RE);
- bool IsPCRel = MachO->getAnyRelocationPCRel(RE);
- unsigned Size = MachO->getAnyRelocationLength(RE);
- uint64_t Offset;
- RelI.getOffset(Offset);
- uint8_t *LocalAddress = Section.Address + Offset;
- unsigned NumBytes = 1 << Size;
- uint64_t Addend = 0;
- memcpy(&Addend, LocalAddress, NumBytes);
-
- if (isExtern) {
- // Obtain the symbol name which is referenced in the relocation
- symbol_iterator Symbol = RelI.getSymbol();
- StringRef TargetName;
- Symbol->getName(TargetName);
- // First search for the symbol in the local symbol table
- SymbolTableMap::const_iterator lsi = Symbols.find(TargetName.data());
- if (lsi != Symbols.end()) {
- Value.SectionID = lsi->second.first;
- Value.Addend = lsi->second.second + Addend;
- } else {
- // Search for the symbol in the global symbol table
- SymbolTableMap::const_iterator gsi = GlobalSymbolTable.find(TargetName.data());
- if (gsi != GlobalSymbolTable.end()) {
- Value.SectionID = gsi->second.first;
- Value.Addend = gsi->second.second + Addend;
- } else {
- Value.SymbolName = TargetName.data();
- Value.Addend = Addend;
- }
- }
- } else {
- SectionRef Sec = MachO->getRelocationSection(RE);
- Value.SectionID = findOrEmitSection(Obj, Sec, true, ObjSectionToID);
- uint64_t Addr;
- Sec.getAddress(Addr);
- Value.Addend = Addend - Addr;
- }
-
- if (Arch == Triple::x86_64 && (RelType == MachO::X86_64_RELOC_GOT ||
- RelType == MachO::X86_64_RELOC_GOT_LOAD)) {
- assert(IsPCRel);
- assert(Size == 2);
- StubMap::const_iterator i = Stubs.find(Value);
- uint8_t *Addr;
- if (i != Stubs.end()) {
- Addr = Section.Address + i->second;
- } else {
- Stubs[Value] = Section.StubOffset;
- uint8_t *GOTEntry = Section.Address + Section.StubOffset;
- RelocationEntry RE(SectionID, Section.StubOffset,
- MachO::X86_64_RELOC_UNSIGNED, 0, false, 3);
- if (Value.SymbolName)
- addRelocationForSymbol(RE, Value.SymbolName);
- else
- addRelocationForSection(RE, Value.SectionID);
- Section.StubOffset += 8;
- Addr = GOTEntry;
- }
- resolveRelocation(Section, Offset, (uint64_t)Addr,
- MachO::X86_64_RELOC_UNSIGNED, Value.Addend, true, 2);
- } else if (Arch == Triple::arm &&
- (RelType & 0xf) == MachO::ARM_RELOC_BR24) {
- // This is an ARM branch relocation, need to use a stub function.
-
- // Look up for existing stub.
- StubMap::const_iterator i = Stubs.find(Value);
- if (i != Stubs.end())
- resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + i->second,
- RelType, 0, IsPCRel, Size);
- else {
- // Create a new stub function.
- Stubs[Value] = Section.StubOffset;
- uint8_t *StubTargetAddr = createStubFunction(Section.Address +
- Section.StubOffset);
- RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
- MachO::GENERIC_RELOC_VANILLA, Value.Addend);
- if (Value.SymbolName)
- addRelocationForSymbol(RE, Value.SymbolName);
- else
- addRelocationForSection(RE, Value.SectionID);
- resolveRelocation(Section, Offset,
- (uint64_t)Section.Address + Section.StubOffset,
- RelType, 0, IsPCRel, Size);
- Section.StubOffset += getMaxStubSize();
- }
- } else {
- RelocationEntry RE(SectionID, Offset, RelType, Value.Addend,
- IsPCRel, Size);
- if (Value.SymbolName)
- addRelocationForSymbol(RE, Value.SymbolName);
- else
- addRelocationForSection(RE, Value.SectionID);
- }
-}
-
-
-bool RuntimeDyldMachO::isCompatibleFormat(
- const ObjectBuffer *InputBuffer) const {
- if (InputBuffer->getBufferSize() < 4)
- return false;
- StringRef Magic(InputBuffer->getBufferStart(), 4);
- if (Magic == "\xFE\xED\xFA\xCE") return true;
- if (Magic == "\xCE\xFA\xED\xFE") return true;
- if (Magic == "\xFE\xED\xFA\xCF") return true;
- if (Magic == "\xCF\xFA\xED\xFE") return true;
- return false;
}
} // end namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
index bbf6aa9..7d1dc02 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
@@ -14,66 +14,31 @@
#ifndef LLVM_RUNTIME_DYLD_MACHO_H
#define LLVM_RUNTIME_DYLD_MACHO_H
+#include "ObjectImageCommon.h"
#include "RuntimeDyldImpl.h"
-#include "llvm/ADT/IndexedMap.h"
#include "llvm/Object/MachO.h"
#include "llvm/Support/Format.h"
+#define DEBUG_TYPE "dyld"
+
using namespace llvm;
using namespace llvm::object;
-
namespace llvm {
class RuntimeDyldMachO : public RuntimeDyldImpl {
- bool resolveI386Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend);
- bool resolveX86_64Relocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend);
- bool resolveARMRelocation(uint8_t *LocalAddress,
- uint64_t FinalAddress,
- uint64_t Value,
- bool isPCRel,
- unsigned Type,
- unsigned Size,
- int64_t Addend);
-
- void resolveRelocation(const SectionEntry &Section,
- uint64_t Offset,
- uint64_t Value,
- uint32_t Type,
- int64_t Addend,
- bool isPCRel,
- unsigned Size);
-
- unsigned getMaxStubSize() {
- if (Arch == Triple::arm || Arch == Triple::thumb)
- return 8; // 32-bit instruction and 32-bit address
- else if (Arch == Triple::x86_64)
- return 8; // GOT entry
- else
- return 0;
- }
-
- unsigned getStubAlignment() {
- return 1;
- }
+protected:
+ struct SectionOffsetPair {
+ unsigned SectionID;
+ uint64_t Offset;
+ };
struct EHFrameRelatedSections {
- EHFrameRelatedSections() : EHFrameSID(RTDYLD_INVALID_SECTION_ID),
- TextSID(RTDYLD_INVALID_SECTION_ID),
- ExceptTabSID(RTDYLD_INVALID_SECTION_ID) {}
- EHFrameRelatedSections(SID EH, SID T, SID Ex)
- : EHFrameSID(EH), TextSID(T), ExceptTabSID(Ex) {}
+ EHFrameRelatedSections()
+ : EHFrameSID(RTDYLD_INVALID_SECTION_ID),
+ TextSID(RTDYLD_INVALID_SECTION_ID),
+ ExceptTabSID(RTDYLD_INVALID_SECTION_ID) {}
+ EHFrameRelatedSections(SID EH, SID T, SID Ex)
+ : EHFrameSID(EH), TextSID(T), ExceptTabSID(Ex) {}
SID EHFrameSID;
SID TextSID;
SID ExceptTabSID;
@@ -83,21 +48,130 @@ class RuntimeDyldMachO : public RuntimeDyldImpl {
// in a table until we receive a request to register all unregistered
// EH frame sections with the memory manager.
SmallVector<EHFrameRelatedSections, 2> UnregisteredEHFrameSections;
-public:
+
RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {}
- virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value);
- virtual void processRelocationRef(unsigned SectionID,
- RelocationRef RelI,
- ObjectImage &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- const SymbolTableMap &Symbols,
- StubMap &Stubs);
- virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const;
- virtual void registerEHFrames();
- virtual void finalizeLoad(ObjSectionToIDMap &SectionMap);
+ /// Extract the addend encoded in the instruction.
+ uint64_t decodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
+ uint32_t RelType) const;
+
+ /// Construct a RelocationValueRef representing the relocation target.
+ /// For Symbols in known sections, this will return a RelocationValueRef
+ /// representing a (SectionID, Offset) pair.
+ /// For Symbols whose section is not known, this will return a
+ /// (SymbolName, Offset) pair, where the Offset is taken from the instruction
+ /// immediate (held in RE.Addend).
+ /// In both cases the Addend field is *NOT* fixed up to be PC-relative. That
+ /// should be done by the caller where appropriate by calling makePCRel on
+ /// the RelocationValueRef.
+ RelocationValueRef getRelocationValueRef(ObjectImage &ObjImg,
+ const relocation_iterator &RI,
+ const RelocationEntry &RE,
+ ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols);
+
+ /// Make the RelocationValueRef addend PC-relative.
+ void makeValueAddendPCRel(RelocationValueRef &Value, ObjectImage &ObjImg,
+ const relocation_iterator &RI);
+
+ /// Dump information about the relocation entry (RE) and resolved value.
+ void dumpRelocationToResolve(const RelocationEntry &RE, uint64_t Value) const;
+
+public:
+ /// Create an ObjectImage from the given ObjectBuffer.
+ static ObjectImage *createObjectImage(ObjectBuffer *InputBuffer) {
+ return new ObjectImageCommon(InputBuffer);
+ }
+
+ /// Create an ObjectImage from the given ObjectFile.
+ static ObjectImage *
+ createObjectImageFromFile(std::unique_ptr<object::ObjectFile> InputObject) {
+ return new ObjectImageCommon(std::move(InputObject));
+ }
+
+ /// Create a RuntimeDyldMachO instance for the given target architecture.
+ static std::unique_ptr<RuntimeDyldMachO> create(Triple::ArchType Arch,
+ RTDyldMemoryManager *mm);
+
+ /// Write the least significant 'Size' bytes in 'Value' out at the address
+ /// pointed to by Addr. Check for overflow.
+ bool writeBytesUnaligned(uint8_t *Addr, uint64_t Value, unsigned Size);
+
+ SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
+
+ bool isCompatibleFormat(const ObjectBuffer *Buffer) const override;
+ bool isCompatibleFile(const object::ObjectFile *Obj) const override;
+ void registerEHFrames() override;
+};
+
+/// RuntimeDyldMachOTarget - Templated base class for generic MachO linker
+/// algorithms and data structures.
+///
+/// Concrete, target specific sub-classes can be accessed via the impl()
+/// methods. (i.e. the RuntimeDyldMachO hierarchy uses the Curiously
+/// Recurring Template Idiom). Concrete subclasses for each target
+/// can be found in ./Targets.
+template <typename Impl>
+class RuntimeDyldMachOCRTPBase : public RuntimeDyldMachO {
+private:
+ Impl &impl() { return static_cast<Impl &>(*this); }
+ const Impl &impl() const { return static_cast<const Impl &>(*this); }
+
+protected:
+
+ /// Parse the given relocation, which must be a non-scattered, and
+ /// return a RelocationEntry representing the information. The 'Addend' field
+ /// will contain the unmodified instruction immediate.
+ RelocationEntry getBasicRelocationEntry(unsigned SectionID,
+ ObjectImage &ObjImg,
+ const relocation_iterator &RI) const {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+
+ const SectionEntry &Section = Sections[SectionID];
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RelInfo);
+ unsigned Size = Obj.getAnyRelocationLength(RelInfo);
+ uint64_t Offset;
+ RI->getOffset(Offset);
+ uint8_t *LocalAddress = Section.Address + Offset;
+ unsigned NumBytes = 1 << Size;
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+ uint64_t Addend = impl().decodeAddend(LocalAddress, NumBytes, RelType);
+
+ return RelocationEntry(SectionID, Offset, RelType, Addend, IsPCRel, Size);
+ }
+
+public:
+ RuntimeDyldMachOCRTPBase(RTDyldMemoryManager *mm) : RuntimeDyldMachO(mm) {}
+
+ void finalizeLoad(ObjectImage &ObjImg, ObjSectionToIDMap &SectionMap) {
+ unsigned EHFrameSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned TextSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned ExceptTabSID = RTDYLD_INVALID_SECTION_ID;
+ ObjSectionToIDMap::iterator i, e;
+
+ for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
+ const SectionRef &Section = i->first;
+ StringRef Name;
+ Section.getName(Name);
+ if (Name == "__eh_frame")
+ EHFrameSID = i->second;
+ else if (Name == "__text")
+ TextSID = i->second;
+ else if (Name == "__gcc_except_tab")
+ ExceptTabSID = i->second;
+ else
+ impl().finalizeSection(ObjImg, i->second, Section);
+ }
+ UnregisteredEHFrameSections.push_back(
+ EHFrameRelatedSections(EHFrameSID, TextSID, ExceptTabSID));
+ }
};
} // end namespace llvm
+#undef DEBUG_TYPE
+
#endif
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
new file mode 100644
index 0000000..775ed9e
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
@@ -0,0 +1,255 @@
+//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOAARCH64_H
+#define LLVM_RUNTIMEDYLDMACHOAARCH64_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOAArch64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
+public:
+ RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM)
+ : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 8; }
+
+ unsigned getStubAlignment() override { return 8; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ assert(!Obj.isRelocationScattered(RelInfo) && "");
+
+ // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
+ // addend for the following relocation. If found: (1) store the associated
+ // addend, (2) consume the next relocation, and (3) use the stored addend to
+ // override the addend.
+ bool HasExplicitAddend = false;
+ int64_t ExplicitAddend = 0;
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
+ assert(!Obj.getPlainRelocationExternal(RelInfo));
+ assert(!Obj.getAnyRelocationPCRel(RelInfo));
+ assert(Obj.getAnyRelocationLength(RelInfo) == 2);
+ HasExplicitAddend = true;
+ int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
+ // Sign-extend the 24-bit to 64-bit.
+ ExplicitAddend = (RawAddend << 40) >> 40;
+ ++RelI;
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+ }
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ if (HasExplicitAddend) {
+ RE.Addend = ExplicitAddend;
+ Value.Addend = ExplicitAddend;
+ }
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ RE.Addend = Value.Addend;
+
+ if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
+ RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
+ // Mask in the target value a byte at a time (we don't have an alignment
+ // guarantee for the target address, so this is safest).
+ if (RE.Size < 2)
+ llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
+
+ writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
+ break;
+ }
+ case MachO::ARM64_RELOC_BRANCH26: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // Check if the addend is encoded in the instruction.
+ uint32_t EncodedAddend = *p & 0x03FFFFFF;
+ if (EncodedAddend != 0) {
+ if (RE.Addend == 0)
+ llvm_unreachable("branch26 instruction has embedded addend.");
+ else
+ llvm_unreachable("branch26 instruction has embedded addend and"
+ "ARM64_RELOC_ADDEND.");
+ }
+ // Check if branch is in range.
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ uint64_t PCRelVal = Value - FinalAddress + RE.Addend;
+ assert(isInt<26>(PCRelVal) && "Branch target out of range!");
+ // Insert the value into the instruction.
+ *p = (*p & 0xFC000000) | ((uint32_t)(PCRelVal >> 2) & 0x03FFFFFF);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // Check if the addend is encoded in the instruction.
+ uint32_t EncodedAddend =
+ ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3);
+ if (EncodedAddend != 0) {
+ if (RE.Addend == 0)
+ llvm_unreachable("adrp instruction has embedded addend.");
+ else
+ llvm_unreachable("adrp instruction has embedded addend and"
+ "ARM64_RELOC_ADDEND.");
+ }
+ // Adjust for PC-relative relocation and offset.
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ uint64_t PCRelVal =
+ ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
+ // Check that the value fits into 21 bits (+ 12 lower bits).
+ assert(isInt<33>(PCRelVal) && "Invalid page reloc value!");
+ // Insert the value into the instruction.
+ uint32_t ImmLoValue = (uint32_t)(PCRelVal << 17) & 0x60000000;
+ uint32_t ImmHiValue = (uint32_t)(PCRelVal >> 9) & 0x00FFFFE0;
+ *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // Check if the addend is encoded in the instruction.
+ uint32_t EncodedAddend = *p & 0x003FFC00;
+ if (EncodedAddend != 0) {
+ if (RE.Addend == 0)
+ llvm_unreachable("adrp instruction has embedded addend.");
+ else
+ llvm_unreachable("adrp instruction has embedded addend and"
+ "ARM64_RELOC_ADDEND.");
+ }
+ // Add the offset from the symbol.
+ Value += RE.Addend;
+ // Mask out the page address and only use the lower 12 bits.
+ Value &= 0xFFF;
+ // Check which instruction we are updating to obtain the implicit shift
+ // factor from LDR/STR instructions.
+ if (*p & 0x08000000) {
+ uint32_t ImplicitShift = ((*p >> 30) & 0x3);
+ switch (ImplicitShift) {
+ case 0:
+ // Check if this a vector op.
+ if ((*p & 0x04800000) == 0x04800000) {
+ ImplicitShift = 4;
+ assert(((Value & 0xF) == 0) &&
+ "128-bit LDR/STR not 16-byte aligned.");
+ }
+ break;
+ case 1:
+ assert(((Value & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
+ case 2:
+ assert(((Value & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
+ case 3:
+ assert(((Value & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
+ }
+ // Compensate for implicit shift.
+ Value >>= ImplicitShift;
+ }
+ // Insert the value into the instruction.
+ *p = (*p & 0xFFC003FF) | ((uint32_t)(Value << 10) & 0x003FFC00);
+ break;
+ }
+ case MachO::ARM64_RELOC_SUBTRACTOR:
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
+ llvm_unreachable("Relocation type not implemented yet!");
+ case MachO::ARM64_RELOC_ADDEND:
+ llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
+ "processRelocationRef!");
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {}
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ assert(RE.Size == 2);
+ SectionEntry &Section = Sections[RE.SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end())
+ Addr = Section.Address + i->second;
+ else {
+ // FIXME: There must be a better way to do this then to check and fix the
+ // alignment every time!!!
+ uintptr_t BaseAddress = uintptr_t(Section.Address);
+ uintptr_t StubAlignment = getStubAlignment();
+ uintptr_t StubAddress =
+ (BaseAddress + Section.StubOffset + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ assert(((StubAddress % getStubAlignment()) == 0) &&
+ "GOT entry not aligned");
+ RelocationEntry GOTRE(RE.SectionID, StubOffset,
+ MachO::ARM64_RELOC_UNSIGNED, Value.Addend,
+ /*IsPCRel=*/false, /*Size=*/3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.StubOffset = StubOffset + getMaxStubSize();
+ Addr = (uint8_t *)StubAddress;
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, /*Addend=*/0,
+ RE.IsPCRel, RE.Size);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOAARCH64_H
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
new file mode 100644
index 0000000..1de9942
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
@@ -0,0 +1,154 @@
+//===----- RuntimeDyldMachOARM.h ---- MachO/ARM specific code. ----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOARM_H
+#define LLVM_RUNTIMEDYLDMACHOARM_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOARM
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> {
+public:
+ RuntimeDyldMachOARM(RTDyldMemoryManager *MM) : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 8; }
+
+ unsigned getStubAlignment() override { return 4; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ if (Obj.isRelocationScattered(RelInfo))
+ return ++++RelI;
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ if ((RE.RelType & 0xf) == MachO::ARM_RELOC_BR24)
+ processBranchRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Addend;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ Value -= FinalAddress;
+ // ARM PCRel relocations have an effective-PC offset of two instructions
+ // (four bytes in Thumb mode, 8 bytes in ARM mode).
+ // FIXME: For now, assume ARM mode.
+ Value -= 8;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::ARM_RELOC_VANILLA:
+ writeBytesUnaligned(LocalAddress, Value, 1 << RE.Size);
+ break;
+ case MachO::ARM_RELOC_BR24: {
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // The low two bits of the value are not encoded.
+ Value >>= 2;
+ // Mask the value to 24 bits.
+ uint64_t FinalValue = Value & 0xffffff;
+ // Check for overflow.
+ if (Value != FinalValue) {
+ Error("ARM BR24 relocation out of range.");
+ return;
+ }
+ // FIXME: If the destination is a Thumb function (and the instruction
+ // is a non-predicated BL instruction), we need to change it to a BLX
+ // instruction instead.
+
+ // Insert the value into the instruction.
+ *p = (*p & ~0xffffff) | FinalValue;
+ break;
+ }
+ case MachO::ARM_THUMB_RELOC_BR22:
+ case MachO::ARM_THUMB_32BIT_BRANCH:
+ case MachO::ARM_RELOC_HALF:
+ case MachO::ARM_RELOC_HALF_SECTDIFF:
+ case MachO::ARM_RELOC_PAIR:
+ case MachO::ARM_RELOC_SECTDIFF:
+ case MachO::ARM_RELOC_LOCAL_SECTDIFF:
+ case MachO::ARM_RELOC_PB_LA_PTR:
+ Error("Relocation type not implemented yet!");
+ return;
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {}
+
+private:
+ void processBranchRelocation(const RelocationEntry &RE,
+ const RelocationValueRef &Value,
+ StubMap &Stubs) {
+ // This is an ARM branch relocation, need to use a stub function.
+ // Look up for existing stub.
+ SectionEntry &Section = Sections[RE.SectionID];
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.Address + i->second;
+ } else {
+ // Create a new stub function.
+ Stubs[Value] = Section.StubOffset;
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset);
+ RelocationEntry StubRE(RE.SectionID, StubTargetAddr - Section.Address,
+ MachO::GENERIC_RELOC_VANILLA, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(StubRE, Value.SymbolName);
+ else
+ addRelocationForSection(StubRE, Value.SectionID);
+ Addr = Section.Address + Section.StubOffset;
+ Section.StubOffset += getMaxStubSize();
+ }
+ RelocationEntry TargetRE(Value.SectionID, RE.Offset, RE.RelType, 0,
+ RE.IsPCRel, RE.Size);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOARM_H
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
new file mode 100644
index 0000000..856c6ca
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
@@ -0,0 +1,315 @@
+//===---- RuntimeDyldMachOI386.h ---- MachO/I386 specific code. ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOI386_H
+#define LLVM_RUNTIMEDYLDMACHOI386_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOI386
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOI386> {
+public:
+ RuntimeDyldMachOI386(RTDyldMemoryManager *MM)
+ : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 0; }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::GENERIC_RELOC_SECTDIFF ||
+ RelType == MachO::GENERIC_RELOC_LOCAL_SECTDIFF)
+ return processSECTDIFFRelocation(SectionID, RelI, ObjImg,
+ ObjSectionToID);
+ else if (Arch == Triple::x86 && RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processI386ScatteredVANILLA(SectionID, RelI, ObjImg,
+ ObjSectionToID);
+ llvm_unreachable("Unhandled scattered relocation.");
+ }
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ // Addends for external, PC-rel relocations on i386 point back to the zero
+ // offset. Calculate the final offset from the relocation target instead.
+ // This allows us to use the same logic for both external and internal
+ // relocations in resolveI386RelocationRef.
+ // bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ // if (IsExtern && RE.IsPCRel) {
+ // uint64_t RelocAddr = 0;
+ // RelI->getAddress(RelocAddr);
+ // Value.Addend += RelocAddr + 4;
+ // }
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ RE.Addend = Value.Addend;
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ Value -= FinalAddress + 4; // see MachOX86_64::resolveRelocation.
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::GENERIC_RELOC_VANILLA:
+ writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
+ break;
+ case MachO::GENERIC_RELOC_SECTDIFF:
+ case MachO::GENERIC_RELOC_LOCAL_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].LoadAddress;
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].LoadAddress;
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(LocalAddress, Value, 1 << RE.Size);
+ break;
+ }
+ case MachO::GENERIC_RELOC_PB_LA_PTR:
+ Error("Relocation type not implemented yet!");
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ Section.getName(Name);
+
+ if (Name == "__jump_table")
+ populateJumpTable(cast<MachOObjectFile>(*ObjImg.getObjectFile()), Section,
+ SectionID);
+ else if (Name == "__pointers")
+ populatePointersSection(cast<MachOObjectFile>(*ObjImg.getObjectFile()),
+ Section, SectionID);
+ }
+
+private:
+ relocation_iterator
+ processSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &Obj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile *MachO =
+ static_cast<const MachOObjectFile *>(Obj.getObjectFile());
+ MachO::any_relocation_info RE =
+ MachO->getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = MachO->getAnyRelocationType(RE);
+ bool IsPCRel = MachO->getAnyRelocationPCRel(RE);
+ unsigned Size = MachO->getAnyRelocationLength(RE);
+ uint64_t Offset;
+ RelI->getOffset(Offset);
+ uint8_t *LocalAddress = Section.Address + Offset;
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend = 0;
+ memcpy(&Addend, LocalAddress, NumBytes);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ MachO->getRelocation(RelI->getRawDataRefImpl());
+
+ uint32_t AddrA = MachO->getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(*MachO, AddrA);
+ assert(SAI != MachO->section_end() && "Can't find section for address A");
+ uint64_t SectionABase;
+ SAI->getAddress(SectionABase);
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode;
+ SectionA.isText(IsCode);
+ uint32_t SectionAID =
+ findOrEmitSection(Obj, SectionA, IsCode, ObjSectionToID);
+
+ uint32_t AddrB = MachO->getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(*MachO, AddrB);
+ assert(SBI != MachO->section_end() && "Can't find section for address B");
+ uint64_t SectionBBase;
+ SBI->getAddress(SectionBBase);
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID =
+ findOrEmitSection(Obj, SectionB, IsCode, ObjSectionToID);
+
+ if (Addend != AddrA - AddrB)
+ Error("Unexpected SECTDIFF relocation addend.");
+
+ DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA << ", AddrB: " << AddrB
+ << ", Addend: " << Addend << ", SectionA ID: " << SectionAID
+ << ", SectionAOffset: " << SectionAOffset
+ << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, 0, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset, IsPCRel,
+ Size);
+
+ addRelocationForSection(R, SectionAID);
+ addRelocationForSection(R, SectionBID);
+
+ return ++RelI;
+ }
+
+ relocation_iterator processI386ScatteredVANILLA(
+ unsigned SectionID, relocation_iterator RelI, ObjectImage &Obj,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile *MachO =
+ static_cast<const MachOObjectFile *>(Obj.getObjectFile());
+ MachO::any_relocation_info RE =
+ MachO->getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = MachO->getAnyRelocationType(RE);
+ bool IsPCRel = MachO->getAnyRelocationPCRel(RE);
+ unsigned Size = MachO->getAnyRelocationLength(RE);
+ uint64_t Offset;
+ RelI->getOffset(Offset);
+ uint8_t *LocalAddress = Section.Address + Offset;
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend = 0;
+ memcpy(&Addend, LocalAddress, NumBytes);
+
+ unsigned SymbolBaseAddr = MachO->getScatteredRelocationValue(RE);
+ section_iterator TargetSI = getSectionByAddress(*MachO, SymbolBaseAddr);
+ assert(TargetSI != MachO->section_end() && "Can't find section for symbol");
+ uint64_t SectionBaseAddr;
+ TargetSI->getAddress(SectionBaseAddr);
+ SectionRef TargetSection = *TargetSI;
+ bool IsCode;
+ TargetSection.isText(IsCode);
+ uint32_t TargetSectionID =
+ findOrEmitSection(Obj, TargetSection, IsCode, ObjSectionToID);
+
+ Addend -= SectionBaseAddr;
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, IsPCRel, Size);
+
+ addRelocationForSection(R, TargetSectionID);
+
+ return ++RelI;
+ }
+
+ // Populate stubs in __jump_table section.
+ void populateJumpTable(MachOObjectFile &Obj, const SectionRef &JTSection,
+ unsigned JTSectionID) {
+ assert(!Obj.is64Bit() &&
+ "__jump_table section not supported in 64-bit MachO.");
+
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(JTSection.getRawDataRefImpl());
+ uint32_t JTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ unsigned JTEntrySize = Sec32.reserved2;
+ unsigned NumJTEntries = JTSectionSize / JTEntrySize;
+ uint8_t *JTSectionAddr = getSectionAddress(JTSectionID);
+ unsigned JTEntryOffset = 0;
+
+ assert((JTSectionSize % JTEntrySize) == 0 &&
+ "Jump-table section does not contain a whole number of stubs?");
+
+ for (unsigned i = 0; i < NumJTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ StringRef IndirectSymbolName;
+ SI->getName(IndirectSymbolName);
+ uint8_t *JTEntryAddr = JTSectionAddr + JTEntryOffset;
+ createStubFunction(JTEntryAddr);
+ RelocationEntry RE(JTSectionID, JTEntryOffset + 1,
+ MachO::GENERIC_RELOC_VANILLA, 0, true, 2);
+ addRelocationForSymbol(RE, IndirectSymbolName);
+ JTEntryOffset += JTEntrySize;
+ }
+ }
+
+ // Populate __pointers section.
+ void populatePointersSection(MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID) {
+ assert(!Obj.is64Bit() &&
+ "__pointers section not supported in 64-bit MachO.");
+
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(PTSection.getRawDataRefImpl());
+ uint32_t PTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ const unsigned PTEntrySize = 4;
+ unsigned NumPTEntries = PTSectionSize / PTEntrySize;
+ unsigned PTEntryOffset = 0;
+
+ assert((PTSectionSize % PTEntrySize) == 0 &&
+ "Pointers section does not contain a whole number of stubs?");
+
+ DEBUG(dbgs() << "Populating __pointers, Section ID " << PTSectionID << ", "
+ << NumPTEntries << " entries, " << PTEntrySize
+ << " bytes each:\n");
+
+ for (unsigned i = 0; i < NumPTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ StringRef IndirectSymbolName;
+ SI->getName(IndirectSymbolName);
+ DEBUG(dbgs() << " " << IndirectSymbolName << ": index " << SymbolIndex
+ << ", PT offset: " << PTEntryOffset << "\n");
+ RelocationEntry RE(PTSectionID, PTEntryOffset,
+ MachO::GENERIC_RELOC_VANILLA, 0, false, 2);
+ addRelocationForSymbol(RE, IndirectSymbolName);
+ PTEntryOffset += PTEntrySize;
+ }
+ }
+
+ static section_iterator getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr) {
+ section_iterator SI = Obj.section_begin();
+ section_iterator SE = Obj.section_end();
+
+ for (; SI != SE; ++SI) {
+ uint64_t SAddr, SSize;
+ SI->getAddress(SAddr);
+ SI->getSize(SSize);
+ if ((Addr >= SAddr) && (Addr < SAddr + SSize))
+ return SI;
+ }
+
+ return SE;
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOI386_H
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
new file mode 100644
index 0000000..99efe9d
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
@@ -0,0 +1,132 @@
+//===-- RuntimeDyldMachOX86_64.h ---- MachO/X86_64 specific code. -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOX86_64_H
+#define LLVM_RUNTIMEDYLDMACHOX86_64_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOX86_64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOX86_64> {
+public:
+ RuntimeDyldMachOX86_64(RTDyldMemoryManager *MM)
+ : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 8; }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ assert(!Obj.isRelocationScattered(RelInfo) &&
+ "Scattered relocations not supported on X86_64");
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ if (RE.RelType == MachO::X86_64_RELOC_GOT ||
+ RE.RelType == MachO::X86_64_RELOC_GOT_LOAD)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Addend;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ // FIXME: It seems this value needs to be adjusted by 4 for an effective
+ // PC address. Is that expected? Only for branches, perhaps?
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ Value -= FinalAddress + 4;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::X86_64_RELOC_SIGNED_1:
+ case MachO::X86_64_RELOC_SIGNED_2:
+ case MachO::X86_64_RELOC_SIGNED_4:
+ case MachO::X86_64_RELOC_SIGNED:
+ case MachO::X86_64_RELOC_UNSIGNED:
+ case MachO::X86_64_RELOC_BRANCH:
+ writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
+ break;
+ case MachO::X86_64_RELOC_GOT_LOAD:
+ case MachO::X86_64_RELOC_GOT:
+ case MachO::X86_64_RELOC_SUBTRACTOR:
+ case MachO::X86_64_RELOC_TLV:
+ Error("Relocation type not implemented yet!");
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {}
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ SectionEntry &Section = Sections[RE.SectionID];
+ assert(RE.IsPCRel);
+ assert(RE.Size == 2);
+ Value.Addend -= RE.Addend;
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.Address + i->second;
+ } else {
+ Stubs[Value] = Section.StubOffset;
+ uint8_t *GOTEntry = Section.Address + Section.StubOffset;
+ RelocationEntry GOTRE(RE.SectionID, Section.StubOffset,
+ MachO::X86_64_RELOC_UNSIGNED, Value.Addend, false,
+ 3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.StubOffset += 8;
+ Addr = GOTEntry;
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset,
+ MachO::X86_64_RELOC_UNSIGNED, RE.Addend, true, 2);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOX86_64_H
diff --git a/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp b/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
index 9b7d348..b10d51f 100644
--- a/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
@@ -47,7 +47,7 @@ TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
TheTriple.setTriple(sys::getProcessTriple());
// Adjust the triple to match what the user requested.
- const Target *TheTarget = 0;
+ const Target *TheTarget = nullptr;
if (!MArch.empty()) {
for (TargetRegistry::iterator it = TargetRegistry::begin(),
ie = TargetRegistry::end(); it != ie; ++it) {
@@ -61,7 +61,7 @@ TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
if (ErrorStr)
*ErrorStr = "No available targets are compatible with this -march, "
"see -version for the available targets.\n";
- return 0;
+ return nullptr;
}
// Adjust the triple to match (if known), otherwise stick with the
@@ -72,10 +72,10 @@ TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
} else {
std::string Error;
TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Error);
- if (TheTarget == 0) {
+ if (!TheTarget) {
if (ErrorStr)
*ErrorStr = Error;
- return 0;
+ return nullptr;
}
}
OpenPOWER on IntegriCloud