diff options
Diffstat (limited to 'contrib/llvm/lib/ExecutionEngine')
12 files changed, 1024 insertions, 262 deletions
diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp index 906a3a3..e43ba4f 100644 --- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp @@ -948,7 +948,7 @@ static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!"); const uint8_t *Src = (const uint8_t *)IntVal.getRawData(); - if (sys::isLittleEndianHost()) { + if (sys::IsLittleEndianHost) { // Little-endian host - the source is ordered from LSB to MSB. Order the // destination from LSB to MSB: Do a straight copy. memcpy(Dst, Src, StoreBytes); @@ -1009,7 +1009,7 @@ void ExecutionEngine::StoreValueToMemory(const GenericValue &Val, break; } - if (sys::isLittleEndianHost() != getDataLayout()->isLittleEndian()) + if (sys::IsLittleEndianHost != getDataLayout()->isLittleEndian()) // Host and target are different endian - reverse the stored bytes. std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr); } @@ -1021,7 +1021,7 @@ static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) { uint8_t *Dst = reinterpret_cast<uint8_t *>( const_cast<uint64_t *>(IntVal.getRawData())); - if (sys::isLittleEndianHost()) + if (sys::IsLittleEndianHost) // Little-endian host - the destination must be ordered from LSB to MSB. // The source is ordered from LSB to MSB: Do a straight copy. memcpy(Dst, Src, LoadBytes); diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp index f4e8246..f9b08a0 100644 --- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp +++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp @@ -15,11 +15,33 @@ #include "llvm-c/ExecutionEngine.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" #include "llvm/ExecutionEngine/GenericValue.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Module.h" #include "llvm/Support/ErrorHandling.h" #include <cstring> using namespace llvm; +// Wrapping the C bindings types. +DEFINE_SIMPLE_CONVERSION_FUNCTIONS(GenericValue, LLVMGenericValueRef) + +inline DataLayout *unwrap(LLVMTargetDataRef P) { + return reinterpret_cast<DataLayout*>(P); +} + +inline LLVMTargetDataRef wrap(const DataLayout *P) { + return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P)); +} + +inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) { + return reinterpret_cast<TargetLibraryInfo*>(P); +} + +inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfo *P) { + TargetLibraryInfo *X = const_cast<TargetLibraryInfo*>(P); + return reinterpret_cast<LLVMTargetLibraryInfoRef>(X); +} + /*===-- Operations on generic values --------------------------------------===*/ LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty, @@ -132,6 +154,59 @@ LLVMBool LLVMCreateJITCompilerForModule(LLVMExecutionEngineRef *OutJIT, return 1; } +void LLVMInitializeMCJITCompilerOptions(LLVMMCJITCompilerOptions *PassedOptions, + size_t SizeOfPassedOptions) { + LLVMMCJITCompilerOptions options; + options.OptLevel = 0; + options.CodeModel = LLVMCodeModelJITDefault; + options.NoFramePointerElim = false; + options.EnableFastISel = false; + + memcpy(PassedOptions, &options, + std::min(sizeof(options), SizeOfPassedOptions)); +} + +LLVMBool LLVMCreateMCJITCompilerForModule( + LLVMExecutionEngineRef *OutJIT, LLVMModuleRef M, + LLVMMCJITCompilerOptions *PassedOptions, size_t SizeOfPassedOptions, + char **OutError) { + LLVMMCJITCompilerOptions options; + // If the user passed a larger sized options struct, then they were compiled + // against a newer LLVM. Tell them that something is wrong. + if (SizeOfPassedOptions > sizeof(options)) { + *OutError = strdup( + "Refusing to use options struct that is larger than my own; assuming " + "LLVM library mismatch."); + return 1; + } + + // Defend against the user having an old version of the API by ensuring that + // any fields they didn't see are cleared. We must defend against fields being + // set to the bitwise equivalent of zero, and assume that this means "do the + // default" as if that option hadn't been available. + LLVMInitializeMCJITCompilerOptions(&options, sizeof(options)); + memcpy(&options, PassedOptions, SizeOfPassedOptions); + + TargetOptions targetOptions; + targetOptions.NoFramePointerElim = options.NoFramePointerElim; + targetOptions.EnableFastISel = options.EnableFastISel; + + std::string Error; + EngineBuilder builder(unwrap(M)); + builder.setEngineKind(EngineKind::JIT) + .setErrorStr(&Error) + .setUseMCJIT(true) + .setOptLevel((CodeGenOpt::Level)options.OptLevel) + .setCodeModel(unwrap(options.CodeModel)) + .setTargetOptions(targetOptions); + if (ExecutionEngine *JIT = builder.create()) { + *OutJIT = wrap(JIT); + return 0; + } + *OutError = strdup(Error.c_str()); + return 1; +} + LLVMBool LLVMCreateExecutionEngine(LLVMExecutionEngineRef *OutEE, LLVMModuleProviderRef MP, char **OutError) { @@ -176,6 +251,8 @@ void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE) { int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F, unsigned ArgC, const char * const *ArgV, const char * const *EnvP) { + unwrap(EE)->finalizeObject(); + std::vector<std::string> ArgVec; for (unsigned I = 0; I != ArgC; ++I) ArgVec.push_back(ArgV[I]); @@ -186,6 +263,8 @@ int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F, LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F, unsigned NumArgs, LLVMGenericValueRef *Args) { + unwrap(EE)->finalizeObject(); + std::vector<GenericValue> ArgVec; ArgVec.reserve(NumArgs); for (unsigned I = 0; I != NumArgs; ++I) @@ -234,7 +313,8 @@ LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name, return 1; } -void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE, LLVMValueRef Fn) { +void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE, + LLVMValueRef Fn) { return unwrap(EE)->recompileAndRelinkFunction(unwrap<Function>(Fn)); } @@ -248,5 +328,7 @@ void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global, } void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global) { + unwrap(EE)->finalizeObject(); + return unwrap(EE)->getPointerToGlobal(unwrap<GlobalValue>(Global)); } diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp index 526c04e..b95a9e8 100644 --- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -114,6 +114,15 @@ static void executeFRemInst(GenericValue &Dest, GenericValue Src1, Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ break; +#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \ + case Type::VectorTyID: { \ + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ + Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \ + for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \ + Dest.AggregateVal[_i].IntVal = APInt(1, \ + Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\ + } break; + // Handle pointers specially because they must be compared with only as much // width as the host has. We _do not_ want to be comparing 64 bit values when // running on a 32-bit target, otherwise the upper 32 bits might mess up @@ -129,6 +138,7 @@ static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(eq,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty); IMPLEMENT_POINTER_ICMP(==); default: dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n"; @@ -142,6 +152,7 @@ static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(ne,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty); IMPLEMENT_POINTER_ICMP(!=); default: dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n"; @@ -155,6 +166,7 @@ static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(ult,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty); IMPLEMENT_POINTER_ICMP(<); default: dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n"; @@ -168,6 +180,7 @@ static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(slt,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty); IMPLEMENT_POINTER_ICMP(<); default: dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n"; @@ -181,6 +194,7 @@ static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(ugt,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty); IMPLEMENT_POINTER_ICMP(>); default: dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n"; @@ -194,6 +208,7 @@ static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(sgt,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty); IMPLEMENT_POINTER_ICMP(>); default: dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n"; @@ -207,6 +222,7 @@ static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(ule,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty); IMPLEMENT_POINTER_ICMP(<=); default: dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n"; @@ -220,6 +236,7 @@ static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(sle,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty); IMPLEMENT_POINTER_ICMP(<=); default: dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n"; @@ -233,6 +250,7 @@ static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(uge,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty); IMPLEMENT_POINTER_ICMP(>=); default: dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n"; @@ -246,6 +264,7 @@ static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2, GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_INTEGER_ICMP(sge,Ty); + IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty); IMPLEMENT_POINTER_ICMP(>=); default: dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n"; @@ -285,12 +304,29 @@ void Interpreter::visitICmpInst(ICmpInst &I) { Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \ break +#define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \ + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ + Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \ + for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \ + Dest.AggregateVal[_i].IntVal = APInt(1, \ + Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\ + break; + +#define IMPLEMENT_VECTOR_FCMP(OP) \ + case Type::VectorTyID: \ + if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \ + IMPLEMENT_VECTOR_FCMP_T(OP, Float); \ + } else { \ + IMPLEMENT_VECTOR_FCMP_T(OP, Double); \ + } + static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; switch (Ty->getTypeID()) { IMPLEMENT_FCMP(==, Float); IMPLEMENT_FCMP(==, Double); + IMPLEMENT_VECTOR_FCMP(==); default: dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n"; llvm_unreachable(0); @@ -298,17 +334,65 @@ static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2, return Dest; } +#define IMPLEMENT_SCALAR_NANS(TY, X,Y) \ + if (TY->isFloatTy()) { \ + if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \ + Dest.IntVal = APInt(1,false); \ + return Dest; \ + } \ + } else { \ + if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \ + Dest.IntVal = APInt(1,false); \ + return Dest; \ + } \ + } + +#define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \ + assert(X.AggregateVal.size() == Y.AggregateVal.size()); \ + Dest.AggregateVal.resize( X.AggregateVal.size() ); \ + for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \ + if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \ + Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \ + Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \ + else { \ + Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \ + } \ + } + +#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \ + if (TY->isVectorTy()) { \ + if (dyn_cast<VectorType>(TY)->getElementType()->isFloatTy()) { \ + MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \ + } else { \ + MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \ + } \ + } \ + + + static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2, - Type *Ty) { + Type *Ty) +{ GenericValue Dest; + // if input is scalar value and Src1 or Src2 is NaN return false + IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2) + // if vector input detect NaNs and fill mask + MASK_VECTOR_NANS(Ty, Src1, Src2, false) + GenericValue DestMask = Dest; switch (Ty->getTypeID()) { IMPLEMENT_FCMP(!=, Float); IMPLEMENT_FCMP(!=, Double); - - default: - dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n"; - llvm_unreachable(0); + IMPLEMENT_VECTOR_FCMP(!=); + default: + dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n"; + llvm_unreachable(0); } + // in vector case mask out NaN elements + if (Ty->isVectorTy()) + for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) + if (DestMask.AggregateVal[_i].IntVal == false) + Dest.AggregateVal[_i].IntVal = APInt(1,false); + return Dest; } @@ -318,6 +402,7 @@ static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2, switch (Ty->getTypeID()) { IMPLEMENT_FCMP(<=, Float); IMPLEMENT_FCMP(<=, Double); + IMPLEMENT_VECTOR_FCMP(<=); default: dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n"; llvm_unreachable(0); @@ -331,6 +416,7 @@ static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2, switch (Ty->getTypeID()) { IMPLEMENT_FCMP(>=, Float); IMPLEMENT_FCMP(>=, Double); + IMPLEMENT_VECTOR_FCMP(>=); default: dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n"; llvm_unreachable(0); @@ -344,6 +430,7 @@ static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2, switch (Ty->getTypeID()) { IMPLEMENT_FCMP(<, Float); IMPLEMENT_FCMP(<, Double); + IMPLEMENT_VECTOR_FCMP(<); default: dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n"; llvm_unreachable(0); @@ -357,6 +444,7 @@ static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2, switch (Ty->getTypeID()) { IMPLEMENT_FCMP(>, Float); IMPLEMENT_FCMP(>, Double); + IMPLEMENT_VECTOR_FCMP(>); default: dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n"; llvm_unreachable(0); @@ -375,18 +463,32 @@ static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2, return Dest; \ } +#define IMPLEMENT_VECTOR_UNORDERED(TY, X,Y, _FUNC) \ + if (TY->isVectorTy()) { \ + GenericValue DestMask = Dest; \ + Dest = _FUNC(Src1, Src2, Ty); \ + for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) \ + if (DestMask.AggregateVal[_i].IntVal == true) \ + Dest.AggregateVal[_i].IntVal = APInt(1,true); \ + return Dest; \ + } static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ) return executeFCMP_OEQ(Src1, Src2, Ty); + } static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE) return executeFCMP_ONE(Src1, Src2, Ty); } @@ -394,6 +496,8 @@ static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE) return executeFCMP_OLE(Src1, Src2, Ty); } @@ -401,6 +505,8 @@ static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE) return executeFCMP_OGE(Src1, Src2, Ty); } @@ -408,6 +514,8 @@ static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT) return executeFCMP_OLT(Src1, Src2, Ty); } @@ -415,33 +523,88 @@ static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; IMPLEMENT_UNORDERED(Ty, Src1, Src2) + MASK_VECTOR_NANS(Ty, Src1, Src2, true) + IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT) return executeFCMP_OGT(Src1, Src2, Ty); } static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; - if (Ty->isFloatTy()) + if(Ty->isVectorTy()) { + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); + Dest.AggregateVal.resize( Src1.AggregateVal.size() ); + if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) { + for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) + Dest.AggregateVal[_i].IntVal = APInt(1, + ( (Src1.AggregateVal[_i].FloatVal == + Src1.AggregateVal[_i].FloatVal) && + (Src2.AggregateVal[_i].FloatVal == + Src2.AggregateVal[_i].FloatVal))); + } else { + for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) + Dest.AggregateVal[_i].IntVal = APInt(1, + ( (Src1.AggregateVal[_i].DoubleVal == + Src1.AggregateVal[_i].DoubleVal) && + (Src2.AggregateVal[_i].DoubleVal == + Src2.AggregateVal[_i].DoubleVal))); + } + } else if (Ty->isFloatTy()) Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal && Src2.FloatVal == Src2.FloatVal)); - else + else { Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal && Src2.DoubleVal == Src2.DoubleVal)); + } return Dest; } static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2, Type *Ty) { GenericValue Dest; - if (Ty->isFloatTy()) + if(Ty->isVectorTy()) { + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); + Dest.AggregateVal.resize( Src1.AggregateVal.size() ); + if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) { + for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) + Dest.AggregateVal[_i].IntVal = APInt(1, + ( (Src1.AggregateVal[_i].FloatVal != + Src1.AggregateVal[_i].FloatVal) || + (Src2.AggregateVal[_i].FloatVal != + Src2.AggregateVal[_i].FloatVal))); + } else { + for( size_t _i=0;_i<Src1.AggregateVal.size();_i++) + Dest.AggregateVal[_i].IntVal = APInt(1, + ( (Src1.AggregateVal[_i].DoubleVal != + Src1.AggregateVal[_i].DoubleVal) || + (Src2.AggregateVal[_i].DoubleVal != + Src2.AggregateVal[_i].DoubleVal))); + } + } else if (Ty->isFloatTy()) Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal || Src2.FloatVal != Src2.FloatVal)); - else + else { Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal || Src2.DoubleVal != Src2.DoubleVal)); + } return Dest; } +static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2, + const Type *Ty, const bool val) { + GenericValue Dest; + if(Ty->isVectorTy()) { + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); + Dest.AggregateVal.resize( Src1.AggregateVal.size() ); + for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++) + Dest.AggregateVal[_i].IntVal = APInt(1,val); + } else { + Dest.IntVal = APInt(1, val); + } + + return Dest; +} + void Interpreter::visitFCmpInst(FCmpInst &I) { ExecutionContext &SF = ECStack.back(); Type *Ty = I.getOperand(0)->getType(); @@ -450,8 +613,14 @@ void Interpreter::visitFCmpInst(FCmpInst &I) { GenericValue R; // Result switch (I.getPredicate()) { - case FCmpInst::FCMP_FALSE: R.IntVal = APInt(1,false); break; - case FCmpInst::FCMP_TRUE: R.IntVal = APInt(1,true); break; + default: + dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I; + llvm_unreachable(0); + break; + case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false); + break; + case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true); + break; case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break; case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break; case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break; @@ -466,9 +635,6 @@ void Interpreter::visitFCmpInst(FCmpInst &I) { case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break; case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break; case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break; - default: - dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I; - llvm_unreachable(0); } SetValue(&I, R, SF); @@ -502,16 +668,8 @@ static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1, case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty); case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty); case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty); - case FCmpInst::FCMP_FALSE: { - GenericValue Result; - Result.IntVal = APInt(1, false); - return Result; - } - case FCmpInst::FCMP_TRUE: { - GenericValue Result; - Result.IntVal = APInt(1, true); - return Result; - } + case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false); + case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true); default: dbgs() << "Unhandled Cmp predicate\n"; llvm_unreachable(0); @@ -525,27 +683,105 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) { GenericValue Src2 = getOperandValue(I.getOperand(1), SF); GenericValue R; // Result - switch (I.getOpcode()) { - case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break; - case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break; - case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break; - case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break; - case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break; - case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break; - case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break; - case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break; - case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break; - case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break; - case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break; - case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break; - case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break; - case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break; - case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break; - default: - dbgs() << "Don't know how to handle this binary operator!\n-->" << I; - llvm_unreachable(0); + // First process vector operation + if (Ty->isVectorTy()) { + assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); + R.AggregateVal.resize(Src1.AggregateVal.size()); + + // Macros to execute binary operation 'OP' over integer vectors +#define INTEGER_VECTOR_OPERATION(OP) \ + for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ + R.AggregateVal[i].IntVal = \ + Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal; + + // Additional macros to execute binary operations udiv/sdiv/urem/srem since + // they have different notation. +#define INTEGER_VECTOR_FUNCTION(OP) \ + for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ + R.AggregateVal[i].IntVal = \ + Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal); + + // Macros to execute binary operation 'OP' over floating point type TY + // (float or double) vectors +#define FLOAT_VECTOR_FUNCTION(OP, TY) \ + for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \ + R.AggregateVal[i].TY = \ + Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY; + + // Macros to choose appropriate TY: float or double and run operation + // execution +#define FLOAT_VECTOR_OP(OP) { \ + if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) \ + FLOAT_VECTOR_FUNCTION(OP, FloatVal) \ + else { \ + if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \ + FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \ + else { \ + dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \ + llvm_unreachable(0); \ + } \ + } \ +} + + switch(I.getOpcode()){ + default: + dbgs() << "Don't know how to handle this binary operator!\n-->" << I; + llvm_unreachable(0); + break; + case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break; + case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break; + case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break; + case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break; + case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break; + case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break; + case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break; + case Instruction::And: INTEGER_VECTOR_OPERATION(&) break; + case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break; + case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break; + case Instruction::FAdd: FLOAT_VECTOR_OP(+) break; + case Instruction::FSub: FLOAT_VECTOR_OP(-) break; + case Instruction::FMul: FLOAT_VECTOR_OP(*) break; + case Instruction::FDiv: FLOAT_VECTOR_OP(/) break; + case Instruction::FRem: + if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) + for (unsigned i = 0; i < R.AggregateVal.size(); ++i) + R.AggregateVal[i].FloatVal = + fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal); + else { + if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy()) + for (unsigned i = 0; i < R.AggregateVal.size(); ++i) + R.AggregateVal[i].DoubleVal = + fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal); + else { + dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n"; + llvm_unreachable(0); + } + } + break; + } + } else { + switch (I.getOpcode()) { + default: + dbgs() << "Don't know how to handle this binary operator!\n-->" << I; + llvm_unreachable(0); + break; + case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break; + case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break; + case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break; + case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break; + case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break; + case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break; + case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break; + case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break; + case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break; + case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break; + case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break; + case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break; + case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break; + case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break; + case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break; + } } - SetValue(&I, R, SF); } diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp index fee10e1..38aa547 100644 --- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp +++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp @@ -14,6 +14,7 @@ #include "llvm/ExecutionEngine/MCJIT.h" #include "llvm/ExecutionEngine/ObjectBuffer.h" #include "llvm/ExecutionEngine/ObjectImage.h" +#include "llvm/ExecutionEngine/SectionMemoryManager.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" @@ -46,13 +47,14 @@ ExecutionEngine *MCJIT::createJIT(Module *M, // FIXME: Don't do this here. sys::DynamicLibrary::LoadLibraryPermanently(0, NULL); - return new MCJIT(M, TM, JMM, GVsWithCode); + return new MCJIT(M, TM, JMM ? JMM : new SectionMemoryManager(), GVsWithCode); } MCJIT::MCJIT(Module *m, TargetMachine *tm, RTDyldMemoryManager *MM, bool AllocateGVsWithCode) - : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(MM), Dyld(MM), - isCompiled(false), M(m) { + : ExecutionEngine(m), TM(tm), Ctx(0), + MemMgr(MM ? MM : new SectionMemoryManager()), Dyld(MemMgr), + IsLoaded(false), M(m), ObjCache(0) { setDataLayout(TM->getDataLayout()); } @@ -64,7 +66,11 @@ MCJIT::~MCJIT() { delete TM; } -void MCJIT::emitObject(Module *m) { +void MCJIT::setObjectCache(ObjectCache* NewCache) { + ObjCache = NewCache; +} + +ObjectBufferStream* MCJIT::emitObject(Module *m) { /// Currently, MCJIT only supports a single module and the module passed to /// this function call is expected to be the contained module. The module /// is passed as a parameter here to prepare for multiple module support in @@ -77,30 +83,66 @@ void MCJIT::emitObject(Module *m) { // FIXME: Track compilation state on a per-module basis when multiple modules // are supported. // Re-compilation is not supported - if (isCompiled) - return; + assert(!IsLoaded); PassManager PM; PM.add(new DataLayout(*TM->getDataLayout())); // The RuntimeDyld will take ownership of this shortly - OwningPtr<ObjectBufferStream> Buffer(new ObjectBufferStream()); + OwningPtr<ObjectBufferStream> CompiledObject(new ObjectBufferStream()); // Turn the machine code intermediate representation into bytes in memory // that may be executed. - if (TM->addPassesToEmitMC(PM, Ctx, Buffer->getOStream(), false)) { + if (TM->addPassesToEmitMC(PM, Ctx, CompiledObject->getOStream(), false)) { report_fatal_error("Target does not support MC emission!"); } // Initialize passes. PM.run(*m); // Flush the output buffer to get the generated code into memory - Buffer->flush(); + CompiledObject->flush(); + + // If we have an object cache, tell it about the new object. + // Note that we're using the compiled image, not the loaded image (as below). + if (ObjCache) { + // MemoryBuffer is a thin wrapper around the actual memory, so it's OK + // to create a temporary object here and delete it after the call. + OwningPtr<MemoryBuffer> MB(CompiledObject->getMemBuffer()); + ObjCache->notifyObjectCompiled(m, MB.get()); + } + + return CompiledObject.take(); +} + +void MCJIT::loadObject(Module *M) { + + // Get a thread lock to make sure we aren't trying to load multiple times + MutexGuard locked(lock); + + // FIXME: Track compilation state on a per-module basis when multiple modules + // are supported. + // Re-compilation is not supported + if (IsLoaded) + return; + + OwningPtr<ObjectBuffer> ObjectToLoad; + // Try to load the pre-compiled object from cache if possible + if (0 != ObjCache) { + OwningPtr<MemoryBuffer> PreCompiledObject(ObjCache->getObjectCopy(M)); + if (0 != PreCompiledObject.get()) + ObjectToLoad.reset(new ObjectBuffer(PreCompiledObject.take())); + } + + // If the cache did not contain a suitable object, compile the object + if (!ObjectToLoad) { + ObjectToLoad.reset(emitObject(M)); + assert(ObjectToLoad.get() && "Compilation did not produce an object."); + } // Load the object into the dynamic linker. // handing off ownership of the buffer - LoadedObject.reset(Dyld.loadObject(Buffer.take())); + LoadedObject.reset(Dyld.loadObject(ObjectToLoad.take())); if (!LoadedObject) report_fatal_error(Dyld.getErrorString()); @@ -113,7 +155,7 @@ void MCJIT::emitObject(Module *m) { NotifyObjectEmitted(*LoadedObject); // FIXME: Add support for per-module compilation state - isCompiled = true; + IsLoaded = true; } // FIXME: Add a parameter to identify which object is being finalized when @@ -122,19 +164,18 @@ void MCJIT::emitObject(Module *m) { // protection in the interface. void MCJIT::finalizeObject() { // If the module hasn't been compiled, just do that. - if (!isCompiled) { - // If the call to Dyld.resolveRelocations() is removed from emitObject() + if (!IsLoaded) { + // If the call to Dyld.resolveRelocations() is removed from loadObject() // we'll need to do that here. - emitObject(M); - - // Set page permissions. - MemMgr->applyPermissions(); - - return; + loadObject(M); + } else { + // Resolve any relocations. + Dyld.resolveRelocations(); } - // Resolve any relocations. - Dyld.resolveRelocations(); + StringRef EHData = Dyld.getEHFrameSection(); + if (!EHData.empty()) + MemMgr->registerEHFrames(EHData); // Set page permissions. MemMgr->applyPermissions(); @@ -151,8 +192,8 @@ void *MCJIT::getPointerToFunction(Function *F) { // dies. // FIXME: Add support for per-module compilation state - if (!isCompiled) - emitObject(M); + if (!IsLoaded) + loadObject(M); if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) { bool AbortOnFailure = !F->hasExternalWeakLinkage(); @@ -284,8 +325,8 @@ GenericValue MCJIT::runFunction(Function *F, void *MCJIT::getPointerToNamedFunction(const std::string &Name, bool AbortOnFailure) { // FIXME: Add support for per-module compilation state - if (!isCompiled) - emitObject(M); + if (!IsLoaded) + loadObject(M); if (!isSymbolSearchingDisabled() && MemMgr) { void *ptr = MemMgr->getPointerToNamedFunction(Name, false); diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h index 283a8e5..8c4bf6e 100644 --- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h +++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h @@ -12,6 +12,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" +#include "llvm/ExecutionEngine/ObjectCache.h" #include "llvm/ExecutionEngine/RuntimeDyld.h" #include "llvm/PassManager.h" @@ -34,16 +35,23 @@ class MCJIT : public ExecutionEngine { SmallVector<JITEventListener*, 2> EventListeners; // FIXME: Add support for multiple modules - bool isCompiled; + bool IsLoaded; Module *M; OwningPtr<ObjectImage> LoadedObject; + // An optional ObjectCache to be notified of compiled objects and used to + // perform lookup of pre-compiled code to avoid re-compilation. + ObjectCache *ObjCache; + public: ~MCJIT(); /// @name ExecutionEngine interface implementation /// @{ + /// Sets the object manager that MCJIT should use to avoid compilation. + virtual void setObjectCache(ObjectCache *manager); + virtual void finalizeObject(); virtual void *getPointerToBasicBlock(BasicBlock *BB); @@ -102,7 +110,9 @@ protected: /// this function call is expected to be the contained module. The module /// is passed as a parameter here to prepare for multiple module support in /// the future. - void emitObject(Module *M); + ObjectBufferStream* emitObject(Module *M); + + void loadObject(Module *M); void NotifyObjectEmitted(const ObjectImage& Obj); void NotifyFreeingObject(const ObjectImage& Obj); diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp index fa35acd..bac77ce 100644 --- a/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp +++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp @@ -138,9 +138,46 @@ bool SectionMemoryManager::applyPermissions(std::string *ErrMsg) // Read-write data memory already has the correct permissions + // Some platforms with separate data cache and instruction cache require + // explicit cache flush, otherwise JIT code manipulations (like resolved + // relocations) will get to the data cache but not to the instruction cache. + invalidateInstructionCache(); + return false; } +// Determine whether we can register EH tables. +#if (defined(__GNUC__) && !defined(__ARM_EABI__) && \ + !defined(__USING_SJLJ_EXCEPTIONS__)) +#define HAVE_EHTABLE_SUPPORT 1 +#else +#define HAVE_EHTABLE_SUPPORT 0 +#endif + +#if HAVE_EHTABLE_SUPPORT +extern "C" void __register_frame(void*); + +static const char *processFDE(const char *Entry) { + const char *P = Entry; + uint32_t Length = *((uint32_t*)P); + P += 4; + uint32_t Offset = *((uint32_t*)P); + if (Offset != 0) + __register_frame((void*)Entry); + return P + Length; +} +#endif + +void SectionMemoryManager::registerEHFrames(StringRef SectionData) { +#if HAVE_EHTABLE_SUPPORT + const char *P = SectionData.data(); + const char *End = SectionData.data() + SectionData.size(); + do { + P = processFDE(P); + } while(P != End); +#endif +} + error_code SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup, unsigned Permissions) { diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp index 409b25f..a08b508 100644 --- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp +++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp @@ -25,10 +25,15 @@ using namespace llvm::object; // Empty out-of-line virtual destructor as the key function. RTDyldMemoryManager::~RTDyldMemoryManager() {} +void RTDyldMemoryManager::registerEHFrames(StringRef SectionData) {} RuntimeDyldImpl::~RuntimeDyldImpl() {} namespace llvm { +StringRef RuntimeDyldImpl::getEHFrameSection() { + return StringRef(); +} + // Resolve the relocations for all symbols we currently know about. void RuntimeDyldImpl::resolveRelocations() { // First, resolve relocations associated with external symbols. @@ -96,7 +101,8 @@ ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) { bool isCommon = flags & SymbolRef::SF_Common; if (isCommon) { // Add the common symbols to a list. We'll allocate them all below. - uint64_t Align = getCommonSymbolAlignment(*i); + uint32_t Align; + Check(i->getAlignment(Align)); uint64_t Size = 0; Check(i->getSize(Size)); CommonSize += Size + Align; @@ -154,18 +160,8 @@ ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) { isFirstRelocation = false; } - ObjRelocationInfo RI; - RI.SectionID = SectionID; - Check(i->getAdditionalInfo(RI.AdditionalInfo)); - Check(i->getOffset(RI.Offset)); - Check(i->getSymbol(RI.Symbol)); - Check(i->getType(RI.Type)); - - DEBUG(dbgs() << "\t\tAddend: " << RI.AdditionalInfo - << " Offset: " << format("%p", (uintptr_t)RI.Offset) - << " Type: " << (uint32_t)(RI.Type & 0xffffffffL) - << "\n"); - processRelocationRef(RI, *obj, LocalSections, LocalSymbols, Stubs); + processRelocationRef(SectionID, *i, *obj, LocalSections, LocalSymbols, + Stubs); } } @@ -183,7 +179,7 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj, if (!Addr) report_fatal_error("Unable to allocate memory for common symbols!"); uint64_t Offset = 0; - Sections.push_back(SectionEntry(StringRef(), Addr, TotalSize, TotalSize, 0)); + Sections.push_back(SectionEntry(StringRef(), Addr, TotalSize, 0)); memset(Addr, 0, TotalSize); DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID @@ -243,6 +239,12 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj, Check(Section.isReadOnlyData(IsReadOnly)); Check(Section.getSize(DataSize)); Check(Section.getName(Name)); + if (StubSize > 0) { + unsigned StubAlignment = getStubAlignment(); + unsigned EndAlignment = (DataSize | Alignment) & -(DataSize | Alignment); + if (StubAlignment > EndAlignment) + StubBufSize += StubAlignment - EndAlignment; + } unsigned Allocate; unsigned SectionID = Sections.size(); @@ -295,8 +297,7 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj, << "\n"); } - Sections.push_back(SectionEntry(Name, Addr, Allocate, DataSize, - (uintptr_t)pData)); + Sections.push_back(SectionEntry(Name, Addr, DataSize, (uintptr_t)pData)); return SectionID; } @@ -339,7 +340,25 @@ void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE, } uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) { - if (Arch == Triple::arm) { + if (Arch == Triple::aarch64) { + // This stub has to be able to access the full address space, + // since symbol lookup won't necessarily find a handy, in-range, + // PLT stub for functions which could be anywhere. + uint32_t *StubAddr = (uint32_t*)Addr; + + // Stub can use ip0 (== x16) to calculate address + *StubAddr = 0xd2e00010; // movz ip0, #:abs_g3:<addr> + StubAddr++; + *StubAddr = 0xf2c00010; // movk ip0, #:abs_g2_nc:<addr> + StubAddr++; + *StubAddr = 0xf2a00010; // movk ip0, #:abs_g1_nc:<addr> + StubAddr++; + *StubAddr = 0xf2800010; // movk ip0, #:abs_g0_nc:<addr> + StubAddr++; + *StubAddr = 0xd61f0200; // br ip0 + + return Addr; + } else if (Arch == Triple::arm) { // TODO: There is only ARM far stub now. We should add the Thumb stub, // and stubs for branches Thumb - ARM and ARM - Thumb. uint32_t *StubAddr = (uint32_t*)Addr; @@ -380,6 +399,13 @@ uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) { writeInt32BE(Addr+40, 0x4E800420); // bctr return Addr; + } else if (Arch == Triple::systemz) { + writeInt16BE(Addr, 0xC418); // lgrl %r1,.+8 + writeInt16BE(Addr+2, 0x0000); + writeInt16BE(Addr+4, 0x0004); + writeInt16BE(Addr+6, 0x07F1); // brc 15,%r1 + // 8-byte address stored at Addr + 8 + return Addr; } return Addr; } @@ -401,26 +427,14 @@ void RuntimeDyldImpl::reassignSectionAddress(unsigned SectionID, Sections[SectionID].LoadAddress = Addr; } -void RuntimeDyldImpl::resolveRelocationEntry(const RelocationEntry &RE, - uint64_t Value) { - // Ignore relocations for sections that were not loaded - if (Sections[RE.SectionID].Address != 0) { - DEBUG(dbgs() << "\tSectionID: " << RE.SectionID - << " + " << RE.Offset << " (" - << format("%p", Sections[RE.SectionID].Address + RE.Offset) << ")" - << " RelType: " << RE.RelType - << " Addend: " << RE.Addend - << "\n"); - - resolveRelocation(Sections[RE.SectionID], RE.Offset, - Value, RE.RelType, RE.Addend); - } -} - void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs, uint64_t Value) { for (unsigned i = 0, e = Relocs.size(); i != e; ++i) { - resolveRelocationEntry(Relocs[i], Value); + const RelocationEntry &RE = Relocs[i]; + // Ignore relocations for sections that were not loaded + if (Sections[RE.SectionID].Address == 0) + continue; + resolveRelocation(RE, Value); } } @@ -534,4 +548,8 @@ StringRef RuntimeDyld::getErrorString() { return Dyld->getErrorString(); } +StringRef RuntimeDyld::getEHFrameSection() { + return Dyld->getEHFrameSection(); +} + } // end namespace llvm diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp index b8537b1..d4d84d3 100644 --- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp @@ -41,7 +41,7 @@ error_code check(error_code Err) { template<class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> { - LLVM_ELF_IMPORT_TYPES(ELFT) + LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) typedef Elf_Shdr_Impl<ELFT> Elf_Shdr; typedef Elf_Sym_Impl<ELFT> Elf_Sym; @@ -151,6 +151,14 @@ void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef, namespace llvm { +StringRef RuntimeDyldELF::getEHFrameSection() { + for (int i = 0, e = Sections.size(); i != e; ++i) { + if (Sections[i].Name == ".eh_frame") + return StringRef((const char*)Sections[i].Address, Sections[i].Size); + } + return StringRef(); +} + ObjectImage *RuntimeDyldELF::createObjectImage(ObjectBuffer *Buffer) { if (Buffer->getBufferSize() < ELF::EI_NIDENT) llvm_unreachable("Unexpected ELF object size"); @@ -269,6 +277,85 @@ void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section, } } +void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, + uint64_t Offset, + uint64_t Value, + uint32_t Type, + int64_t Addend) { + uint32_t *TargetPtr = reinterpret_cast<uint32_t*>(Section.Address + Offset); + uint64_t FinalAddress = Section.LoadAddress + Offset; + + DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x" + << format("%llx", Section.Address + Offset) + << " FinalAddress: 0x" << format("%llx",FinalAddress) + << " Value: 0x" << format("%llx",Value) + << " Type: 0x" << format("%x",Type) + << " Addend: 0x" << format("%llx",Addend) + << "\n"); + + switch (Type) { + default: + llvm_unreachable("Relocation type not implemented yet!"); + break; + case ELF::R_AARCH64_ABS64: { + uint64_t *TargetPtr = reinterpret_cast<uint64_t*>(Section.Address + Offset); + *TargetPtr = Value + Addend; + break; + } + case ELF::R_AARCH64_PREL32: { // test-shift.ll (.eh_frame) + uint64_t Result = Value + Addend - FinalAddress; + assert(static_cast<int64_t>(Result) >= INT32_MIN && + static_cast<int64_t>(Result) <= UINT32_MAX); + *TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU); + break; + } + case ELF::R_AARCH64_CALL26: // fallthrough + case ELF::R_AARCH64_JUMP26: { + // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the + // calculation. + uint64_t BranchImm = Value + Addend - FinalAddress; + + // "Check that -2^27 <= result < 2^27". + assert(-(1LL << 27) <= static_cast<int64_t>(BranchImm) && + static_cast<int64_t>(BranchImm) < (1LL << 27)); + // Immediate goes in bits 25:0 of B and BL. + *TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2; + break; + } + case ELF::R_AARCH64_MOVW_UABS_G3: { + uint64_t Result = Value + Addend; + // Immediate goes in bits 20:5 of MOVZ/MOVK instruction + *TargetPtr |= Result >> (48 - 5); + // Shift is "lsl #48", in bits 22:21 + *TargetPtr |= 3 << 21; + break; + } + case ELF::R_AARCH64_MOVW_UABS_G2_NC: { + uint64_t Result = Value + Addend; + // Immediate goes in bits 20:5 of MOVZ/MOVK instruction + *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5)); + // Shift is "lsl #32", in bits 22:21 + *TargetPtr |= 2 << 21; + break; + } + case ELF::R_AARCH64_MOVW_UABS_G1_NC: { + uint64_t Result = Value + Addend; + // Immediate goes in bits 20:5 of MOVZ/MOVK instruction + *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5)); + // Shift is "lsl #16", in bits 22:21 + *TargetPtr |= 1 << 21; + break; + } + case ELF::R_AARCH64_MOVW_UABS_G0_NC: { + uint64_t Result = Value + Addend; + // Immediate goes in bits 20:5 of MOVZ/MOVK instruction + *TargetPtr |= ((Result & 0xffffU) << 5); + // Shift is "lsl #0", in bits 22:21. No action needed. + break; + } + } +} + void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section, uint64_t Offset, uint32_t Value, @@ -541,6 +628,11 @@ void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section, llvm_unreachable("Relocation R_PPC64_REL32 overflow"); writeInt32BE(LocalAddress, delta); } break; + case ELF::R_PPC64_REL64: { + uint64_t FinalAddress = (Section.LoadAddress + Offset); + uint64_t Delta = Value - FinalAddress + Addend; + writeInt64BE(LocalAddress, Delta); + } break; case ELF::R_PPC64_ADDR64 : writeInt64BE(LocalAddress, Value + Addend); break; @@ -560,6 +652,48 @@ void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section, } } +void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section, + uint64_t Offset, + uint64_t Value, + uint32_t Type, + int64_t Addend) { + uint8_t *LocalAddress = Section.Address + Offset; + switch (Type) { + default: + llvm_unreachable("Relocation type not implemented yet!"); + break; + case ELF::R_390_PC16DBL: + case ELF::R_390_PLT16DBL: { + int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); + assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow"); + writeInt16BE(LocalAddress, Delta / 2); + break; + } + case ELF::R_390_PC32DBL: + case ELF::R_390_PLT32DBL: { + int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); + assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow"); + writeInt32BE(LocalAddress, Delta / 2); + break; + } + case ELF::R_390_PC32: { + int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset); + assert(int32_t(Delta) == Delta && "R_390_PC32 overflow"); + writeInt32BE(LocalAddress, Delta); + break; + } + case ELF::R_390_64: + writeInt64BE(LocalAddress, Value + Addend); + break; + } +} + +void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE, + uint64_t Value) { + const SectionEntry &Section = Sections[RE.SectionID]; + return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend); +} + void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, uint64_t Offset, uint64_t Value, @@ -574,6 +708,9 @@ void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, (uint32_t)(Value & 0xffffffffL), Type, (uint32_t)(Addend & 0xffffffffL)); break; + case Triple::aarch64: + resolveAArch64Relocation(Section, Offset, Value, Type, Addend); + break; case Triple::arm: // Fall through. case Triple::thumb: resolveARMRelocation(Section, Offset, @@ -589,19 +726,25 @@ void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, case Triple::ppc64: resolvePPC64Relocation(Section, Offset, Value, Type, Addend); break; + case Triple::systemz: + resolveSystemZRelocation(Section, Offset, Value, Type, Addend); + break; default: llvm_unreachable("Unsupported CPU type!"); } } -void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, +void RuntimeDyldELF::processRelocationRef(unsigned SectionID, + RelocationRef RelI, ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, const SymbolTableMap &Symbols, StubMap &Stubs) { - - uint32_t RelType = (uint32_t)(Rel.Type & 0xffffffffL); - intptr_t Addend = (intptr_t)Rel.AdditionalInfo; - const SymbolRef &Symbol = Rel.Symbol; + uint64_t RelType; + Check(RelI.getType(RelType)); + int64_t Addend; + Check(RelI.getAdditionalInfo(Addend)); + SymbolRef Symbol; + Check(RelI.getSymbol(Symbol)); // Obtain the symbol name which is referenced in the relocation StringRef TargetName; @@ -617,14 +760,14 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, Symbol.getType(SymType); if (lsi != Symbols.end()) { Value.SectionID = lsi->second.first; - Value.Addend = lsi->second.second; + Value.Addend = lsi->second.second + Addend; } else { // Search for the symbol in the global symbol table SymbolTableMap::const_iterator gsi = GlobalSymbolTable.find(TargetName.data()); if (gsi != GlobalSymbolTable.end()) { Value.SectionID = gsi->second.first; - Value.Addend = gsi->second.second; + Value.Addend = gsi->second.second + Addend; } else { switch (SymType) { case SymbolRef::ST_Debug: { @@ -657,21 +800,73 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, } } } - DEBUG(dbgs() << "\t\tRel.SectionID: " << Rel.SectionID - << " Rel.Offset: " << Rel.Offset + uint64_t Offset; + Check(RelI.getOffset(Offset)); + + DEBUG(dbgs() << "\t\tSectionID: " << SectionID + << " Offset: " << Offset << "\n"); - if (Arch == Triple::arm && + if (Arch == Triple::aarch64 && + (RelType == ELF::R_AARCH64_CALL26 || + RelType == ELF::R_AARCH64_JUMP26)) { + // This is an AArch64 branch relocation, need to use a stub function. + DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation."); + SectionEntry &Section = Sections[SectionID]; + + // Look for an existing stub. + StubMap::const_iterator i = Stubs.find(Value); + if (i != Stubs.end()) { + resolveRelocation(Section, Offset, + (uint64_t)Section.Address + i->second, RelType, 0); + DEBUG(dbgs() << " Stub function found\n"); + } else { + // Create a new stub function. + DEBUG(dbgs() << " Create a new stub function\n"); + Stubs[Value] = Section.StubOffset; + uint8_t *StubTargetAddr = createStubFunction(Section.Address + + Section.StubOffset); + + RelocationEntry REmovz_g3(SectionID, + StubTargetAddr - Section.Address, + ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend); + RelocationEntry REmovk_g2(SectionID, + StubTargetAddr - Section.Address + 4, + ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend); + RelocationEntry REmovk_g1(SectionID, + StubTargetAddr - Section.Address + 8, + ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend); + RelocationEntry REmovk_g0(SectionID, + StubTargetAddr - Section.Address + 12, + ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend); + + if (Value.SymbolName) { + addRelocationForSymbol(REmovz_g3, Value.SymbolName); + addRelocationForSymbol(REmovk_g2, Value.SymbolName); + addRelocationForSymbol(REmovk_g1, Value.SymbolName); + addRelocationForSymbol(REmovk_g0, Value.SymbolName); + } else { + addRelocationForSection(REmovz_g3, Value.SectionID); + addRelocationForSection(REmovk_g2, Value.SectionID); + addRelocationForSection(REmovk_g1, Value.SectionID); + addRelocationForSection(REmovk_g0, Value.SectionID); + } + resolveRelocation(Section, Offset, + (uint64_t)Section.Address + Section.StubOffset, + RelType, 0); + Section.StubOffset += getMaxStubSize(); + } + } else if (Arch == Triple::arm && (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL || RelType == ELF::R_ARM_JUMP24)) { // This is an ARM branch relocation, need to use a stub function. DEBUG(dbgs() << "\t\tThis is an ARM branch relocation."); - SectionEntry &Section = Sections[Rel.SectionID]; + SectionEntry &Section = Sections[SectionID]; // Look for an existing stub. StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) { - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, RelType, 0); DEBUG(dbgs() << " Stub function found\n"); } else { @@ -680,14 +875,14 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, Stubs[Value] = Section.StubOffset; uint8_t *StubTargetAddr = createStubFunction(Section.Address + Section.StubOffset); - RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address, + RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, ELF::R_ARM_ABS32, Value.Addend); if (Value.SymbolName) addRelocationForSymbol(RE, Value.SymbolName); else addRelocationForSection(RE, Value.SectionID); - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + Section.StubOffset, RelType, 0); Section.StubOffset += getMaxStubSize(); @@ -696,8 +891,8 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, RelType == ELF::R_MIPS_26) { // This is an Mips branch relocation, need to use a stub function. DEBUG(dbgs() << "\t\tThis is a Mips branch relocation."); - SectionEntry &Section = Sections[Rel.SectionID]; - uint8_t *Target = Section.Address + Rel.Offset; + SectionEntry &Section = Sections[SectionID]; + uint8_t *Target = Section.Address + Offset; uint32_t *TargetAddress = (uint32_t *)Target; // Extract the addend from the instruction. @@ -708,7 +903,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, // Look up for existing stub. StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) { - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, RelType, 0); DEBUG(dbgs() << " Stub function found\n"); } else { @@ -719,10 +914,10 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, Section.StubOffset); // Creating Hi and Lo relocations for the filled stub instructions. - RelocationEntry REHi(Rel.SectionID, + RelocationEntry REHi(SectionID, StubTargetAddr - Section.Address, ELF::R_MIPS_HI16, Value.Addend); - RelocationEntry RELo(Rel.SectionID, + RelocationEntry RELo(SectionID, StubTargetAddr - Section.Address + 4, ELF::R_MIPS_LO16, Value.Addend); @@ -734,7 +929,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, addRelocationForSection(RELo, Value.SectionID); } - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + Section.StubOffset, RelType, 0); Section.StubOffset += getMaxStubSize(); @@ -744,8 +939,8 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, // A PPC branch relocation will need a stub function if the target is // an external symbol (Symbol::ST_Unknown) or if the target address // is not within the signed 24-bits branch address. - SectionEntry &Section = Sections[Rel.SectionID]; - uint8_t *Target = Section.Address + Rel.Offset; + SectionEntry &Section = Sections[SectionID]; + uint8_t *Target = Section.Address + Offset; bool RangeOverflow = false; if (SymType != SymbolRef::ST_Unknown) { // A function call may points to the .opd entry, so the final symbol value @@ -755,7 +950,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, int32_t delta = static_cast<int32_t>(Target - RelocTarget); // If it is within 24-bits branch range, just set the branch target if (SignExtend32<24>(delta) == delta) { - RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); + RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); if (Value.SymbolName) addRelocationForSymbol(RE, Value.SymbolName); else @@ -770,7 +965,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) { // Symbol function stub already created, just relocate to it - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, RelType, 0); DEBUG(dbgs() << " Stub function found\n"); } else { @@ -779,21 +974,21 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, Stubs[Value] = Section.StubOffset; uint8_t *StubTargetAddr = createStubFunction(Section.Address + Section.StubOffset); - RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address, + RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, ELF::R_PPC64_ADDR64, Value.Addend); // Generates the 64-bits address loads as exemplified in section // 4.5.1 in PPC64 ELF ABI. - RelocationEntry REhst(Rel.SectionID, + RelocationEntry REhst(SectionID, StubTargetAddr - Section.Address + 2, ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend); - RelocationEntry REhr(Rel.SectionID, + RelocationEntry REhr(SectionID, StubTargetAddr - Section.Address + 6, ELF::R_PPC64_ADDR16_HIGHER, Value.Addend); - RelocationEntry REh(Rel.SectionID, + RelocationEntry REh(SectionID, StubTargetAddr - Section.Address + 14, ELF::R_PPC64_ADDR16_HI, Value.Addend); - RelocationEntry REl(Rel.SectionID, + RelocationEntry REl(SectionID, StubTargetAddr - Section.Address + 18, ELF::R_PPC64_ADDR16_LO, Value.Addend); @@ -809,7 +1004,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, addRelocationForSection(REl, Value.SectionID); } - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + Section.StubOffset, RelType, 0); if (SymType == SymbolRef::ST_Unknown) @@ -819,7 +1014,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, } } } else { - RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); + RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); // Extra check to avoid relocation againt empty symbols (usually // the R_PPC64_TOC). if (Value.SymbolName && !TargetName.empty()) @@ -827,8 +1022,55 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, else addRelocationForSection(RE, Value.SectionID); } + } else if (Arch == Triple::systemz && + (RelType == ELF::R_390_PLT32DBL || + RelType == ELF::R_390_GOTENT)) { + // Create function stubs for both PLT and GOT references, regardless of + // whether the GOT reference is to data or code. The stub contains the + // full address of the symbol, as needed by GOT references, and the + // executable part only adds an overhead of 8 bytes. + // + // We could try to conserve space by allocating the code and data + // parts of the stub separately. However, as things stand, we allocate + // a stub for every relocation, so using a GOT in JIT code should be + // no less space efficient than using an explicit constant pool. + DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation."); + SectionEntry &Section = Sections[SectionID]; + + // Look for an existing stub. + StubMap::const_iterator i = Stubs.find(Value); + uintptr_t StubAddress; + if (i != Stubs.end()) { + StubAddress = uintptr_t(Section.Address) + i->second; + DEBUG(dbgs() << " Stub function found\n"); + } else { + // Create a new stub function. + DEBUG(dbgs() << " Create a new stub function\n"); + + uintptr_t BaseAddress = uintptr_t(Section.Address); + uintptr_t StubAlignment = getStubAlignment(); + StubAddress = (BaseAddress + Section.StubOffset + + StubAlignment - 1) & -StubAlignment; + unsigned StubOffset = StubAddress - BaseAddress; + + Stubs[Value] = StubOffset; + createStubFunction((uint8_t *)StubAddress); + RelocationEntry RE(SectionID, StubOffset + 8, + ELF::R_390_64, Value.Addend - Addend); + if (Value.SymbolName) + addRelocationForSymbol(RE, Value.SymbolName); + else + addRelocationForSection(RE, Value.SectionID); + Section.StubOffset = StubOffset + getMaxStubSize(); + } + + if (RelType == ELF::R_390_GOTENT) + resolveRelocation(Section, Offset, StubAddress + 8, + ELF::R_390_PC32DBL, Addend); + else + resolveRelocation(Section, Offset, StubAddress, RelType, Addend); } else { - RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); + RelocationEntry RE(SectionID, Offset, RelType, Value.Addend); if (Value.SymbolName) addRelocationForSymbol(RE, Value.SymbolName); else @@ -836,13 +1078,6 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, } } -unsigned RuntimeDyldELF::getCommonSymbolAlignment(const SymbolRef &Sym) { - // In ELF, the value of an SHN_COMMON symbol is its alignment requirement. - uint64_t Align; - Check(Sym.getValue(Align)); - return Align; -} - bool RuntimeDyldELF::isCompatibleFormat(const ObjectBuffer *Buffer) const { if (Buffer->getBufferSize() < strlen(ELF::ElfMagic)) return false; diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h index 07e704b..794c7ec 100644 --- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h +++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h @@ -31,7 +31,12 @@ namespace { } // end anonymous namespace class RuntimeDyldELF : public RuntimeDyldImpl { -protected: + void resolveRelocation(const SectionEntry &Section, + uint64_t Offset, + uint64_t Value, + uint32_t Type, + int64_t Addend); + void resolveX86_64Relocation(const SectionEntry &Section, uint64_t Offset, uint64_t Value, @@ -44,6 +49,12 @@ protected: uint32_t Type, int32_t Addend); + void resolveAArch64Relocation(const SectionEntry &Section, + uint64_t Offset, + uint64_t Value, + uint32_t Type, + int64_t Addend); + void resolveARMRelocation(const SectionEntry &Section, uint64_t Offset, uint32_t Value, @@ -62,21 +73,11 @@ protected: uint32_t Type, int64_t Addend); - virtual void resolveRelocation(const SectionEntry &Section, - uint64_t Offset, - uint64_t Value, - uint32_t Type, - int64_t Addend); - - virtual void processRelocationRef(const ObjRelocationInfo &Rel, - ObjectImage &Obj, - ObjSectionToIDMap &ObjSectionToID, - const SymbolTableMap &Symbols, - StubMap &Stubs); - - unsigned getCommonSymbolAlignment(const SymbolRef &Sym); - - virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer); + void resolveSystemZRelocation(const SectionEntry &Section, + uint64_t Offset, + uint64_t Value, + uint32_t Type, + int64_t Addend); uint64_t findPPC64TOC() const; void findOPDEntrySection(ObjectImage &Obj, @@ -84,12 +85,19 @@ protected: RelocationValueRef &Rel); public: - RuntimeDyldELF(RTDyldMemoryManager *mm) - : RuntimeDyldImpl(mm) {} + RuntimeDyldELF(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {} + virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value); + virtual void processRelocationRef(unsigned SectionID, + RelocationRef RelI, + ObjectImage &Obj, + ObjSectionToIDMap &ObjSectionToID, + const SymbolTableMap &Symbols, + StubMap &Stubs); + virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const; + virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer); + virtual StringRef getEHFrameSection(); virtual ~RuntimeDyldELF(); - - bool isCompatibleFormat(const ObjectBuffer *Buffer) const; }; } // end namespace llvm diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h index f100994..383ffab 100644 --- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h +++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h @@ -49,7 +49,7 @@ public: /// Address - address in the linker's memory where the section resides. uint8_t *Address; - /// Size - section size. + /// Size - section size. Doesn't include the stubs. size_t Size; /// LoadAddress - the address of the section in the target process's memory. @@ -67,9 +67,9 @@ public: uintptr_t ObjAddress; SectionEntry(StringRef name, uint8_t *address, size_t size, - uintptr_t stubOffset, uintptr_t objAddress) + uintptr_t objAddress) : Name(name), Address(address), Size(size), LoadAddress((uintptr_t)address), - StubOffset(stubOffset), ObjAddress(objAddress) {} + StubOffset(size), ObjAddress(objAddress) {} }; /// RelocationEntry - used to represent relocations internally in the dynamic @@ -89,20 +89,20 @@ public: /// used to make a relocation section relative instead of symbol relative. intptr_t Addend; + /// True if this is a PCRel relocation (MachO specific). + bool IsPCRel; + + /// The size of this relocation (MachO specific). + unsigned Size; + RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend) - : SectionID(id), Offset(offset), RelType(type), Addend(addend) {} -}; + : SectionID(id), Offset(offset), RelType(type), Addend(addend), + IsPCRel(false), Size(0) {} -/// ObjRelocationInfo - relocation information as read from the object file. -/// Used to pass around data taken from object::RelocationRef, together with -/// the section to which the relocation points (represented by a SectionID). -class ObjRelocationInfo { -public: - unsigned SectionID; - uint64_t Offset; - SymbolRef Symbol; - uint64_t Type; - int64_t AdditionalInfo; + RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend, + bool IsPCRel, unsigned Size) + : SectionID(id), Offset(offset), RelType(type), Addend(addend), + IsPCRel(IsPCRel), Size(Size) {} }; class RelocationValueRef { @@ -166,16 +166,29 @@ protected: Triple::ArchType Arch; inline unsigned getMaxStubSize() { + if (Arch == Triple::aarch64) + return 20; // movz; movk; movk; movk; br if (Arch == Triple::arm || Arch == Triple::thumb) return 8; // 32-bit instruction and 32-bit address else if (Arch == Triple::mipsel || Arch == Triple::mips) return 16; else if (Arch == Triple::ppc64) return 44; + else if (Arch == Triple::x86_64) + return 8; // GOT + else if (Arch == Triple::systemz) + return 16; else return 0; } + inline unsigned getStubAlignment() { + if (Arch == Triple::systemz) + return 8; + else + return 1; + } + bool HasError; std::string ErrorStr; @@ -194,22 +207,15 @@ protected: return (uint8_t*)Sections[SectionID].Address; } - // Subclasses can override this method to get the alignment requirement of - // a common symbol. Returns no alignment requirement if not implemented. - virtual unsigned getCommonSymbolAlignment(const SymbolRef &Sym) { - return 0; - } - - void writeInt16BE(uint8_t *Addr, uint16_t Value) { - if (sys::isLittleEndianHost()) + if (sys::IsLittleEndianHost) Value = sys::SwapByteOrder(Value); *Addr = (Value >> 8) & 0xFF; *(Addr+1) = Value & 0xFF; } void writeInt32BE(uint8_t *Addr, uint32_t Value) { - if (sys::isLittleEndianHost()) + if (sys::IsLittleEndianHost) Value = sys::SwapByteOrder(Value); *Addr = (Value >> 24) & 0xFF; *(Addr+1) = (Value >> 16) & 0xFF; @@ -218,7 +224,7 @@ protected: } void writeInt64BE(uint8_t *Addr, uint64_t Value) { - if (sys::isLittleEndianHost()) + if (sys::IsLittleEndianHost) Value = sys::SwapByteOrder(Value); *Addr = (Value >> 56) & 0xFF; *(Addr+1) = (Value >> 48) & 0xFF; @@ -269,24 +275,16 @@ protected: /// \brief Resolves relocations from Relocs list with address from Value. void resolveRelocationList(const RelocationList &Relocs, uint64_t Value); - void resolveRelocationEntry(const RelocationEntry &RE, uint64_t Value); /// \brief A object file specific relocation resolver - /// \param Section The section where the relocation is being applied - /// \param Offset The offset into the section for this relocation + /// \param RE The relocation to be resolved /// \param Value Target symbol address to apply the relocation action - /// \param Type object file specific relocation type - /// \param Addend A constant addend used to compute the value to be stored - /// into the relocatable field - virtual void resolveRelocation(const SectionEntry &Section, - uint64_t Offset, - uint64_t Value, - uint32_t Type, - int64_t Addend) = 0; + virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value) = 0; /// \brief Parses the object file relocation and stores it to Relocations /// or SymbolRelocations (this depends on the object file type). - virtual void processRelocationRef(const ObjRelocationInfo &Rel, + virtual void processRelocationRef(unsigned SectionID, + RelocationRef RelI, ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, const SymbolTableMap &Symbols, @@ -336,6 +334,8 @@ public: StringRef getErrorString() { return ErrorStr; } virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const = 0; + + virtual StringRef getEHFrameSection(); }; } // end namespace llvm diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp index bcc3df1..01a3fd9 100644 --- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp +++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp @@ -21,16 +21,87 @@ using namespace llvm::object; namespace llvm { +static unsigned char *processFDE(unsigned char *P, intptr_t DeltaForText, intptr_t DeltaForEH) { + uint32_t Length = *((uint32_t*)P); + P += 4; + unsigned char *Ret = P + Length; + uint32_t Offset = *((uint32_t*)P); + if (Offset == 0) // is a CIE + return Ret; + + P += 4; + intptr_t FDELocation = *((intptr_t*)P); + intptr_t NewLocation = FDELocation - DeltaForText; + *((intptr_t*)P) = NewLocation; + P += sizeof(intptr_t); + + // Skip the FDE address range + P += sizeof(intptr_t); + + uint8_t Augmentationsize = *P; + P += 1; + if (Augmentationsize != 0) { + intptr_t LSDA = *((intptr_t*)P); + intptr_t NewLSDA = LSDA - DeltaForEH; + *((intptr_t*)P) = NewLSDA; + } + + return Ret; +} + +static intptr_t computeDelta(SectionEntry *A, SectionEntry *B) { + intptr_t ObjDistance = A->ObjAddress - B->ObjAddress; + intptr_t MemDistance = A->LoadAddress - B->LoadAddress; + return ObjDistance - MemDistance; +} + +StringRef RuntimeDyldMachO::getEHFrameSection() { + SectionEntry *Text = NULL; + SectionEntry *EHFrame = NULL; + SectionEntry *ExceptTab = NULL; + for (int i = 0, e = Sections.size(); i != e; ++i) { + if (Sections[i].Name == "__eh_frame") + EHFrame = &Sections[i]; + else if (Sections[i].Name == "__text") + Text = &Sections[i]; + else if (Sections[i].Name == "__gcc_except_tab") + ExceptTab = &Sections[i]; + } + if (Text == NULL || EHFrame == NULL) + return StringRef(); + + intptr_t DeltaForText = computeDelta(Text, EHFrame); + intptr_t DeltaForEH = 0; + if (ExceptTab) + DeltaForEH = computeDelta(ExceptTab, EHFrame); + + unsigned char *P = EHFrame->Address; + unsigned char *End = P + EHFrame->Size; + do { + P = processFDE(P, DeltaForText, DeltaForEH); + } while(P != End); + + return StringRef((char*)EHFrame->Address, EHFrame->Size); +} + +void RuntimeDyldMachO::resolveRelocation(const RelocationEntry &RE, + uint64_t Value) { + const SectionEntry &Section = Sections[RE.SectionID]; + return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend, + RE.IsPCRel, RE.Size); +} + void RuntimeDyldMachO::resolveRelocation(const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type, - int64_t Addend) { + int64_t Addend, + bool isPCRel, + unsigned LogSize) { uint8_t *LocalAddress = Section.Address + Offset; uint64_t FinalAddress = Section.LoadAddress + Offset; - bool isPCRel = (Type >> 24) & 1; - unsigned MachoType = (Type >> 28) & 0xf; - unsigned Size = 1 << ((Type >> 25) & 3); + unsigned MachoType = Type; + unsigned Size = 1 << LogSize; DEBUG(dbgs() << "resolveRelocation LocalAddress: " << format("%p", LocalAddress) @@ -205,89 +276,111 @@ bool RuntimeDyldMachO::resolveARMRelocation(uint8_t *LocalAddress, return false; } -void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel, +void RuntimeDyldMachO::processRelocationRef(unsigned SectionID, + RelocationRef RelI, ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, const SymbolTableMap &Symbols, StubMap &Stubs) { + const ObjectFile *OF = Obj.getObjectFile(); + const MachOObjectFile *MachO = static_cast<const MachOObjectFile*>(OF); + macho::RelocationEntry RE = MachO->getRelocation(RelI.getRawDataRefImpl()); - uint32_t RelType = (uint32_t) (Rel.Type & 0xffffffffL); + uint32_t RelType = MachO->getAnyRelocationType(RE); RelocationValueRef Value; - SectionEntry &Section = Sections[Rel.SectionID]; + SectionEntry &Section = Sections[SectionID]; + + bool isExtern = MachO->getPlainRelocationExternal(RE); + bool IsPCRel = MachO->getAnyRelocationPCRel(RE); + unsigned Size = MachO->getAnyRelocationLength(RE); + uint64_t Offset; + RelI.getOffset(Offset); + uint8_t *LocalAddress = Section.Address + Offset; + unsigned NumBytes = 1 << Size; + uint64_t Addend = 0; + memcpy(&Addend, LocalAddress, NumBytes); - bool isExtern = (RelType >> 27) & 1; if (isExtern) { // Obtain the symbol name which is referenced in the relocation + SymbolRef Symbol; + RelI.getSymbol(Symbol); StringRef TargetName; - const SymbolRef &Symbol = Rel.Symbol; Symbol.getName(TargetName); // First search for the symbol in the local symbol table SymbolTableMap::const_iterator lsi = Symbols.find(TargetName.data()); if (lsi != Symbols.end()) { Value.SectionID = lsi->second.first; - Value.Addend = lsi->second.second; + Value.Addend = lsi->second.second + Addend; } else { // Search for the symbol in the global symbol table SymbolTableMap::const_iterator gsi = GlobalSymbolTable.find(TargetName.data()); if (gsi != GlobalSymbolTable.end()) { Value.SectionID = gsi->second.first; - Value.Addend = gsi->second.second; - } else + Value.Addend = gsi->second.second + Addend; + } else { Value.SymbolName = TargetName.data(); + Value.Addend = Addend; + } } } else { - error_code err; - uint8_t sectionIndex = static_cast<uint8_t>(RelType & 0xFF); - section_iterator si = Obj.begin_sections(), - se = Obj.end_sections(); - for (uint8_t i = 1; i < sectionIndex; i++) { - error_code err; - si.increment(err); - if (si == se) - break; - } - assert(si != se && "No section containing relocation!"); - Value.SectionID = findOrEmitSection(Obj, *si, true, ObjSectionToID); - Value.Addend = 0; - // FIXME: The size and type of the relocation determines if we can - // encode an Addend in the target location itself, and if so, how many - // bytes we should read in order to get it. We don't yet support doing - // that, and just assuming it's sizeof(intptr_t) is blatantly wrong. - //Value.Addend = *(const intptr_t *)Target; - if (Value.Addend) { - // The MachO addend is an offset from the current section. We need it - // to be an offset from the destination section - Value.Addend += Section.ObjAddress - Sections[Value.SectionID].ObjAddress; - } + SectionRef Sec = MachO->getRelocationSection(RE); + Value.SectionID = findOrEmitSection(Obj, Sec, true, ObjSectionToID); + uint64_t Addr; + Sec.getAddress(Addr); + Value.Addend = Addend - Addr; } - if (Arch == Triple::arm && (RelType & 0xf) == macho::RIT_ARM_Branch24Bit) { + if (Arch == Triple::x86_64 && RelType == macho::RIT_X86_64_GOT) { + assert(IsPCRel); + assert(Size == 2); + StubMap::const_iterator i = Stubs.find(Value); + uint8_t *Addr; + if (i != Stubs.end()) { + Addr = Section.Address + i->second; + } else { + Stubs[Value] = Section.StubOffset; + uint8_t *GOTEntry = Section.Address + Section.StubOffset; + RelocationEntry RE(SectionID, Section.StubOffset, + macho::RIT_X86_64_Unsigned, Value.Addend - 4, false, + 3); + if (Value.SymbolName) + addRelocationForSymbol(RE, Value.SymbolName); + else + addRelocationForSection(RE, Value.SectionID); + Section.StubOffset += 8; + Addr = GOTEntry; + } + resolveRelocation(Section, Offset, (uint64_t)Addr, + macho::RIT_X86_64_Unsigned, 4, true, 2); + } else if (Arch == Triple::arm && + (RelType & 0xf) == macho::RIT_ARM_Branch24Bit) { // This is an ARM branch relocation, need to use a stub function. // Look up for existing stub. StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second, - RelType, 0); + RelType, 0, IsPCRel, Size); else { // Create a new stub function. Stubs[Value] = Section.StubOffset; uint8_t *StubTargetAddr = createStubFunction(Section.Address + Section.StubOffset); - RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address, + RelocationEntry RE(SectionID, StubTargetAddr - Section.Address, macho::RIT_Vanilla, Value.Addend); if (Value.SymbolName) addRelocationForSymbol(RE, Value.SymbolName); else addRelocationForSection(RE, Value.SectionID); - resolveRelocation(Section, Rel.Offset, + resolveRelocation(Section, Offset, (uint64_t)Section.Address + Section.StubOffset, - RelType, 0); + RelType, 0, IsPCRel, Size); Section.StubOffset += getMaxStubSize(); } } else { - RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); + RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, + IsPCRel, Size); if (Value.SymbolName) addRelocationForSymbol(RE, Value.SymbolName); else diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h index 62d8487..df8d3bb 100644 --- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h +++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h @@ -16,7 +16,7 @@ #include "RuntimeDyldImpl.h" #include "llvm/ADT/IndexedMap.h" -#include "llvm/Object/MachOObject.h" +#include "llvm/Object/MachO.h" #include "llvm/Support/Format.h" using namespace llvm; @@ -25,7 +25,6 @@ using namespace llvm::object; namespace llvm { class RuntimeDyldMachO : public RuntimeDyldImpl { -protected: bool resolveI386Relocation(uint8_t *LocalAddress, uint64_t FinalAddress, uint64_t Value, @@ -48,22 +47,25 @@ protected: unsigned Size, int64_t Addend); - virtual void processRelocationRef(const ObjRelocationInfo &Rel, + void resolveRelocation(const SectionEntry &Section, + uint64_t Offset, + uint64_t Value, + uint32_t Type, + int64_t Addend, + bool isPCRel, + unsigned Size); +public: + RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {} + + virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value); + virtual void processRelocationRef(unsigned SectionID, + RelocationRef RelI, ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, const SymbolTableMap &Symbols, StubMap &Stubs); - -public: - virtual void resolveRelocation(const SectionEntry &Section, - uint64_t Offset, - uint64_t Value, - uint32_t Type, - int64_t Addend); - - RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {} - - bool isCompatibleFormat(const ObjectBuffer *Buffer) const; + virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const; + virtual StringRef getEHFrameSection(); }; } // end namespace llvm |