summaryrefslogtreecommitdiffstats
path: root/lib/ExecutionEngine/JIT
diff options
context:
space:
mode:
authored <ed@FreeBSD.org>2009-06-02 17:52:33 +0000
committered <ed@FreeBSD.org>2009-06-02 17:52:33 +0000
commit3277b69d734b9c90b44ebde4ede005717e2c3b2e (patch)
tree64ba909838c23261cace781ece27d106134ea451 /lib/ExecutionEngine/JIT
downloadFreeBSD-src-3277b69d734b9c90b44ebde4ede005717e2c3b2e.zip
FreeBSD-src-3277b69d734b9c90b44ebde4ede005717e2c3b2e.tar.gz
Import LLVM, at r72732.
Diffstat (limited to 'lib/ExecutionEngine/JIT')
-rw-r--r--lib/ExecutionEngine/JIT/CMakeLists.txt11
-rw-r--r--lib/ExecutionEngine/JIT/Intercept.cpp148
-rw-r--r--lib/ExecutionEngine/JIT/JIT.cpp708
-rw-r--r--lib/ExecutionEngine/JIT/JIT.h176
-rw-r--r--lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp1056
-rw-r--r--lib/ExecutionEngine/JIT/JITDwarfEmitter.h87
-rw-r--r--lib/ExecutionEngine/JIT/JITEmitter.cpp1615
-rw-r--r--lib/ExecutionEngine/JIT/JITMemoryManager.cpp541
-rw-r--r--lib/ExecutionEngine/JIT/Makefile37
-rw-r--r--lib/ExecutionEngine/JIT/TargetSelect.cpp83
10 files changed, 4462 insertions, 0 deletions
diff --git a/lib/ExecutionEngine/JIT/CMakeLists.txt b/lib/ExecutionEngine/JIT/CMakeLists.txt
new file mode 100644
index 0000000..d7980d0
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/CMakeLists.txt
@@ -0,0 +1,11 @@
+# TODO: Support other architectures. See Makefile.
+add_definitions(-DENABLE_X86_JIT)
+
+add_partially_linked_object(LLVMJIT
+ Intercept.cpp
+ JIT.cpp
+ JITDwarfEmitter.cpp
+ JITEmitter.cpp
+ JITMemoryManager.cpp
+ TargetSelect.cpp
+ )
diff --git a/lib/ExecutionEngine/JIT/Intercept.cpp b/lib/ExecutionEngine/JIT/Intercept.cpp
new file mode 100644
index 0000000..3dcc462
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/Intercept.cpp
@@ -0,0 +1,148 @@
+//===-- Intercept.cpp - System function interception routines -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// If a function call occurs to an external function, the JIT is designed to use
+// the dynamic loader interface to find a function to call. This is useful for
+// calling system calls and library functions that are not available in LLVM.
+// Some system calls, however, need to be handled specially. For this reason,
+// we intercept some of them here and use our own stubs to handle them.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JIT.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Config/config.h"
+using namespace llvm;
+
+// AtExitHandlers - List of functions to call when the program exits,
+// registered with the atexit() library function.
+static std::vector<void (*)()> AtExitHandlers;
+
+/// runAtExitHandlers - Run any functions registered by the program's
+/// calls to atexit(3), which we intercept and store in
+/// AtExitHandlers.
+///
+static void runAtExitHandlers() {
+ while (!AtExitHandlers.empty()) {
+ void (*Fn)() = AtExitHandlers.back();
+ AtExitHandlers.pop_back();
+ Fn();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Function stubs that are invoked instead of certain library calls
+//===----------------------------------------------------------------------===//
+
+// Force the following functions to be linked in to anything that uses the
+// JIT. This is a hack designed to work around the all-too-clever Glibc
+// strategy of making these functions work differently when inlined vs. when
+// not inlined, and hiding their real definitions in a separate archive file
+// that the dynamic linker can't see. For more info, search for
+// 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
+#if defined(__linux__)
+#if defined(HAVE_SYS_STAT_H)
+#include <sys/stat.h>
+#endif
+#include <fcntl.h>
+/* stat functions are redirecting to __xstat with a version number. On x86-64
+ * linking with libc_nonshared.a and -Wl,--export-dynamic doesn't make 'stat'
+ * available as an exported symbol, so we have to add it explicitly.
+ */
+class StatSymbols {
+public:
+ StatSymbols() {
+ sys::DynamicLibrary::AddSymbol("stat", (void*)(intptr_t)stat);
+ sys::DynamicLibrary::AddSymbol("fstat", (void*)(intptr_t)fstat);
+ sys::DynamicLibrary::AddSymbol("lstat", (void*)(intptr_t)lstat);
+ sys::DynamicLibrary::AddSymbol("stat64", (void*)(intptr_t)stat64);
+ sys::DynamicLibrary::AddSymbol("\x1stat64", (void*)(intptr_t)stat64);
+ sys::DynamicLibrary::AddSymbol("\x1open64", (void*)(intptr_t)open64);
+ sys::DynamicLibrary::AddSymbol("\x1lseek64", (void*)(intptr_t)lseek64);
+ sys::DynamicLibrary::AddSymbol("fstat64", (void*)(intptr_t)fstat64);
+ sys::DynamicLibrary::AddSymbol("lstat64", (void*)(intptr_t)lstat64);
+ sys::DynamicLibrary::AddSymbol("atexit", (void*)(intptr_t)atexit);
+ sys::DynamicLibrary::AddSymbol("mknod", (void*)(intptr_t)mknod);
+ }
+};
+static StatSymbols initStatSymbols;
+#endif // __linux__
+
+// jit_exit - Used to intercept the "exit" library call.
+static void jit_exit(int Status) {
+ runAtExitHandlers(); // Run atexit handlers...
+ exit(Status);
+}
+
+// jit_atexit - Used to intercept the "atexit" library call.
+static int jit_atexit(void (*Fn)(void)) {
+ AtExitHandlers.push_back(Fn); // Take note of atexit handler...
+ return 0; // Always successful
+}
+
+//===----------------------------------------------------------------------===//
+//
+/// getPointerToNamedFunction - This method returns the address of the specified
+/// function by using the dynamic loader interface. As such it is only useful
+/// for resolving library symbols, not code generated symbols.
+///
+void *JIT::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure) {
+ if (!isSymbolSearchingDisabled()) {
+ // Check to see if this is one of the functions we want to intercept. Note,
+ // we cast to intptr_t here to silence a -pedantic warning that complains
+ // about casting a function pointer to a normal pointer.
+ if (Name == "exit") return (void*)(intptr_t)&jit_exit;
+ if (Name == "atexit") return (void*)(intptr_t)&jit_atexit;
+
+ const char *NameStr = Name.c_str();
+ // If this is an asm specifier, skip the sentinal.
+ if (NameStr[0] == 1) ++NameStr;
+
+ // If it's an external function, look it up in the process image...
+ void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
+ if (Ptr) return Ptr;
+
+ // If it wasn't found and if it starts with an underscore ('_') character,
+ // and has an asm specifier, try again without the underscore.
+ if (Name[0] == 1 && NameStr[0] == '_') {
+ Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1);
+ if (Ptr) return Ptr;
+ }
+
+ // Darwin/PPC adds $LDBLStub suffixes to various symbols like printf. These
+ // are references to hidden visibility symbols that dlsym cannot resolve.
+ // If we have one of these, strip off $LDBLStub and try again.
+#if defined(__APPLE__) && defined(__ppc__)
+ if (Name.size() > 9 && Name[Name.size()-9] == '$' &&
+ memcmp(&Name[Name.size()-8], "LDBLStub", 8) == 0) {
+ // First try turning $LDBLStub into $LDBL128. If that fails, strip it off.
+ // This mirrors logic in libSystemStubs.a.
+ std::string Prefix = std::string(Name.begin(), Name.end()-9);
+ if (void *Ptr = getPointerToNamedFunction(Prefix+"$LDBL128", false))
+ return Ptr;
+ if (void *Ptr = getPointerToNamedFunction(Prefix, false))
+ return Ptr;
+ }
+#endif
+ }
+
+ /// If a LazyFunctionCreator is installed, use it to get/create the function.
+ if (LazyFunctionCreator)
+ if (void *RP = LazyFunctionCreator(Name))
+ return RP;
+
+ if (AbortOnFailure) {
+ cerr << "ERROR: Program used external function '" << Name
+ << "' which could not be resolved!\n";
+ abort();
+ }
+ return 0;
+}
diff --git a/lib/ExecutionEngine/JIT/JIT.cpp b/lib/ExecutionEngine/JIT/JIT.cpp
new file mode 100644
index 0000000..f8ae884
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JIT.cpp
@@ -0,0 +1,708 @@
+//===-- JIT.cpp - LLVM Just in Time Compiler ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tool implements a just-in-time compiler for LLVM, allowing direct
+// execution of LLVM bitcode in an efficient manner.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JIT.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Instructions.h"
+#include "llvm/ModuleProvider.h"
+#include "llvm/CodeGen/JITCodeEmitter.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/CodeGen/MachineCodeInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetJITInfo.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/System/DynamicLibrary.h"
+#include "llvm/Config/config.h"
+
+using namespace llvm;
+
+#ifdef __APPLE__
+// Apple gcc defaults to -fuse-cxa-atexit (i.e. calls __cxa_atexit instead
+// of atexit). It passes the address of linker generated symbol __dso_handle
+// to the function.
+// This configuration change happened at version 5330.
+# include <AvailabilityMacros.h>
+# if defined(MAC_OS_X_VERSION_10_4) && \
+ ((MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_4) || \
+ (MAC_OS_X_VERSION_MIN_REQUIRED == MAC_OS_X_VERSION_10_4 && \
+ __APPLE_CC__ >= 5330))
+# ifndef HAVE___DSO_HANDLE
+# define HAVE___DSO_HANDLE 1
+# endif
+# endif
+#endif
+
+#if HAVE___DSO_HANDLE
+extern void *__dso_handle __attribute__ ((__visibility__ ("hidden")));
+#endif
+
+namespace {
+
+static struct RegisterJIT {
+ RegisterJIT() { JIT::Register(); }
+} JITRegistrator;
+
+}
+
+namespace llvm {
+ void LinkInJIT() {
+ }
+}
+
+
+#if defined(__GNUC__) && !defined(__ARM__EABI__)
+
+// libgcc defines the __register_frame function to dynamically register new
+// dwarf frames for exception handling. This functionality is not portable
+// across compilers and is only provided by GCC. We use the __register_frame
+// function here so that code generated by the JIT cooperates with the unwinding
+// runtime of libgcc. When JITting with exception handling enable, LLVM
+// generates dwarf frames and registers it to libgcc with __register_frame.
+//
+// The __register_frame function works with Linux.
+//
+// Unfortunately, this functionality seems to be in libgcc after the unwinding
+// library of libgcc for darwin was written. The code for darwin overwrites the
+// value updated by __register_frame with a value fetched with "keymgr".
+// "keymgr" is an obsolete functionality, which should be rewritten some day.
+// In the meantime, since "keymgr" is on all libgccs shipped with apple-gcc, we
+// need a workaround in LLVM which uses the "keymgr" to dynamically modify the
+// values of an opaque key, used by libgcc to find dwarf tables.
+
+extern "C" void __register_frame(void*);
+
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED <= 1050
+# define USE_KEYMGR 1
+#else
+# define USE_KEYMGR 0
+#endif
+
+#if USE_KEYMGR
+
+namespace {
+
+// LibgccObject - This is the structure defined in libgcc. There is no #include
+// provided for this structure, so we also define it here. libgcc calls it
+// "struct object". The structure is undocumented in libgcc.
+struct LibgccObject {
+ void *unused1;
+ void *unused2;
+ void *unused3;
+
+ /// frame - Pointer to the exception table.
+ void *frame;
+
+ /// encoding - The encoding of the object?
+ union {
+ struct {
+ unsigned long sorted : 1;
+ unsigned long from_array : 1;
+ unsigned long mixed_encoding : 1;
+ unsigned long encoding : 8;
+ unsigned long count : 21;
+ } b;
+ size_t i;
+ } encoding;
+
+ /// fde_end - libgcc defines this field only if some macro is defined. We
+ /// include this field even if it may not there, to make libgcc happy.
+ char *fde_end;
+
+ /// next - At least we know it's a chained list!
+ struct LibgccObject *next;
+};
+
+// "kemgr" stuff. Apparently, all frame tables are stored there.
+extern "C" void _keymgr_set_and_unlock_processwide_ptr(int, void *);
+extern "C" void *_keymgr_get_and_lock_processwide_ptr(int);
+#define KEYMGR_GCC3_DW2_OBJ_LIST 302 /* Dwarf2 object list */
+
+/// LibgccObjectInfo - libgcc defines this struct as km_object_info. It
+/// probably contains all dwarf tables that are loaded.
+struct LibgccObjectInfo {
+
+ /// seenObjects - LibgccObjects already parsed by the unwinding runtime.
+ ///
+ struct LibgccObject* seenObjects;
+
+ /// unseenObjects - LibgccObjects not parsed yet by the unwinding runtime.
+ ///
+ struct LibgccObject* unseenObjects;
+
+ unsigned unused[2];
+};
+
+/// darwin_register_frame - Since __register_frame does not work with darwin's
+/// libgcc,we provide our own function, which "tricks" libgcc by modifying the
+/// "Dwarf2 object list" key.
+void DarwinRegisterFrame(void* FrameBegin) {
+ // Get the key.
+ LibgccObjectInfo* LOI = (struct LibgccObjectInfo*)
+ _keymgr_get_and_lock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST);
+ assert(LOI && "This should be preallocated by the runtime");
+
+ // Allocate a new LibgccObject to represent this frame. Deallocation of this
+ // object may be impossible: since darwin code in libgcc was written after
+ // the ability to dynamically register frames, things may crash if we
+ // deallocate it.
+ struct LibgccObject* ob = (struct LibgccObject*)
+ malloc(sizeof(struct LibgccObject));
+
+ // Do like libgcc for the values of the field.
+ ob->unused1 = (void *)-1;
+ ob->unused2 = 0;
+ ob->unused3 = 0;
+ ob->frame = FrameBegin;
+ ob->encoding.i = 0;
+ ob->encoding.b.encoding = llvm::dwarf::DW_EH_PE_omit;
+
+ // Put the info on both places, as libgcc uses the first or the the second
+ // field. Note that we rely on having two pointers here. If fde_end was a
+ // char, things would get complicated.
+ ob->fde_end = (char*)LOI->unseenObjects;
+ ob->next = LOI->unseenObjects;
+
+ // Update the key's unseenObjects list.
+ LOI->unseenObjects = ob;
+
+ // Finally update the "key". Apparently, libgcc requires it.
+ _keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST,
+ LOI);
+
+}
+
+}
+#endif // __APPLE__
+#endif // __GNUC__
+
+/// createJIT - This is the factory method for creating a JIT for the current
+/// machine, it does not fall back to the interpreter. This takes ownership
+/// of the module provider.
+ExecutionEngine *ExecutionEngine::createJIT(ModuleProvider *MP,
+ std::string *ErrorStr,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel) {
+ ExecutionEngine *EE = JIT::createJIT(MP, ErrorStr, JMM, OptLevel);
+ if (!EE) return 0;
+
+ // Make sure we can resolve symbols in the program as well. The zero arg
+ // to the function tells DynamicLibrary to load the program, not a library.
+ sys::DynamicLibrary::LoadLibraryPermanently(0, ErrorStr);
+ return EE;
+}
+
+JIT::JIT(ModuleProvider *MP, TargetMachine &tm, TargetJITInfo &tji,
+ JITMemoryManager *JMM, CodeGenOpt::Level OptLevel)
+ : ExecutionEngine(MP), TM(tm), TJI(tji) {
+ setTargetData(TM.getTargetData());
+
+ jitstate = new JITState(MP);
+
+ // Initialize JCE
+ JCE = createEmitter(*this, JMM);
+
+ // Add target data
+ MutexGuard locked(lock);
+ FunctionPassManager &PM = jitstate->getPM(locked);
+ PM.add(new TargetData(*TM.getTargetData()));
+
+ // Turn the machine code intermediate representation into bytes in memory that
+ // may be executed.
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, OptLevel)) {
+ cerr << "Target does not support machine code emission!\n";
+ abort();
+ }
+
+ // Register routine for informing unwinding runtime about new EH frames
+#if defined(__GNUC__) && !defined(__ARM_EABI__)
+#if USE_KEYMGR
+ struct LibgccObjectInfo* LOI = (struct LibgccObjectInfo*)
+ _keymgr_get_and_lock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST);
+
+ // The key is created on demand, and libgcc creates it the first time an
+ // exception occurs. Since we need the key to register frames, we create
+ // it now.
+ if (!LOI)
+ LOI = (LibgccObjectInfo*)calloc(sizeof(struct LibgccObjectInfo), 1);
+ _keymgr_set_and_unlock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST, LOI);
+ InstallExceptionTableRegister(DarwinRegisterFrame);
+#else
+ InstallExceptionTableRegister(__register_frame);
+#endif // __APPLE__
+#endif // __GNUC__
+
+ // Initialize passes.
+ PM.doInitialization();
+}
+
+JIT::~JIT() {
+ delete jitstate;
+ delete JCE;
+ delete &TM;
+}
+
+/// addModuleProvider - Add a new ModuleProvider to the JIT. If we previously
+/// removed the last ModuleProvider, we need re-initialize jitstate with a valid
+/// ModuleProvider.
+void JIT::addModuleProvider(ModuleProvider *MP) {
+ MutexGuard locked(lock);
+
+ if (Modules.empty()) {
+ assert(!jitstate && "jitstate should be NULL if Modules vector is empty!");
+
+ jitstate = new JITState(MP);
+
+ FunctionPassManager &PM = jitstate->getPM(locked);
+ PM.add(new TargetData(*TM.getTargetData()));
+
+ // Turn the machine code intermediate representation into bytes in memory
+ // that may be executed.
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) {
+ cerr << "Target does not support machine code emission!\n";
+ abort();
+ }
+
+ // Initialize passes.
+ PM.doInitialization();
+ }
+
+ ExecutionEngine::addModuleProvider(MP);
+}
+
+/// removeModuleProvider - If we are removing the last ModuleProvider,
+/// invalidate the jitstate since the PassManager it contains references a
+/// released ModuleProvider.
+Module *JIT::removeModuleProvider(ModuleProvider *MP, std::string *E) {
+ Module *result = ExecutionEngine::removeModuleProvider(MP, E);
+
+ MutexGuard locked(lock);
+
+ if (jitstate->getMP() == MP) {
+ delete jitstate;
+ jitstate = 0;
+ }
+
+ if (!jitstate && !Modules.empty()) {
+ jitstate = new JITState(Modules[0]);
+
+ FunctionPassManager &PM = jitstate->getPM(locked);
+ PM.add(new TargetData(*TM.getTargetData()));
+
+ // Turn the machine code intermediate representation into bytes in memory
+ // that may be executed.
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) {
+ cerr << "Target does not support machine code emission!\n";
+ abort();
+ }
+
+ // Initialize passes.
+ PM.doInitialization();
+ }
+ return result;
+}
+
+/// deleteModuleProvider - Remove a ModuleProvider from the list of modules,
+/// and deletes the ModuleProvider and owned Module. Avoids materializing
+/// the underlying module.
+void JIT::deleteModuleProvider(ModuleProvider *MP, std::string *E) {
+ ExecutionEngine::deleteModuleProvider(MP, E);
+
+ MutexGuard locked(lock);
+
+ if (jitstate->getMP() == MP) {
+ delete jitstate;
+ jitstate = 0;
+ }
+
+ if (!jitstate && !Modules.empty()) {
+ jitstate = new JITState(Modules[0]);
+
+ FunctionPassManager &PM = jitstate->getPM(locked);
+ PM.add(new TargetData(*TM.getTargetData()));
+
+ // Turn the machine code intermediate representation into bytes in memory
+ // that may be executed.
+ if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) {
+ cerr << "Target does not support machine code emission!\n";
+ abort();
+ }
+
+ // Initialize passes.
+ PM.doInitialization();
+ }
+}
+
+/// run - Start execution with the specified function and arguments.
+///
+GenericValue JIT::runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues) {
+ assert(F && "Function *F was null at entry to run()");
+
+ void *FPtr = getPointerToFunction(F);
+ assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
+ const FunctionType *FTy = F->getFunctionType();
+ const Type *RetTy = FTy->getReturnType();
+
+ assert((FTy->getNumParams() == ArgValues.size() ||
+ (FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
+ "Wrong number of arguments passed into function!");
+ assert(FTy->getNumParams() == ArgValues.size() &&
+ "This doesn't support passing arguments through varargs (yet)!");
+
+ // Handle some common cases first. These cases correspond to common `main'
+ // prototypes.
+ if (RetTy == Type::Int32Ty || RetTy == Type::VoidTy) {
+ switch (ArgValues.size()) {
+ case 3:
+ if (FTy->getParamType(0) == Type::Int32Ty &&
+ isa<PointerType>(FTy->getParamType(1)) &&
+ isa<PointerType>(FTy->getParamType(2))) {
+ int (*PF)(int, char **, const char **) =
+ (int(*)(int, char **, const char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1]),
+ (const char **)GVTOP(ArgValues[2])));
+ return rv;
+ }
+ break;
+ case 2:
+ if (FTy->getParamType(0) == Type::Int32Ty &&
+ isa<PointerType>(FTy->getParamType(1))) {
+ int (*PF)(int, char **) = (int(*)(int, char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1])));
+ return rv;
+ }
+ break;
+ case 1:
+ if (FTy->getNumParams() == 1 &&
+ FTy->getParamType(0) == Type::Int32Ty) {
+ GenericValue rv;
+ int (*PF)(int) = (int(*)(int))(intptr_t)FPtr;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
+ return rv;
+ }
+ break;
+ }
+ }
+
+ // Handle cases where no arguments are passed first.
+ if (ArgValues.empty()) {
+ GenericValue rv;
+ switch (RetTy->getTypeID()) {
+ default: assert(0 && "Unknown return type for function call!");
+ case Type::IntegerTyID: {
+ unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
+ if (BitWidth == 1)
+ rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 8)
+ rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 16)
+ rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 32)
+ rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 64)
+ rv.IntVal = APInt(BitWidth, ((int64_t(*)())(intptr_t)FPtr)());
+ else
+ assert(0 && "Integer types > 64 bits not supported");
+ return rv;
+ }
+ case Type::VoidTyID:
+ rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)());
+ return rv;
+ case Type::FloatTyID:
+ rv.FloatVal = ((float(*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::DoubleTyID:
+ rv.DoubleVal = ((double(*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ assert(0 && "long double not supported yet");
+ return rv;
+ case Type::PointerTyID:
+ return PTOGV(((void*(*)())(intptr_t)FPtr)());
+ }
+ }
+
+ // Okay, this is not one of our quick and easy cases. Because we don't have a
+ // full FFI, we have to codegen a nullary stub function that just calls the
+ // function we are interested in, passing in constants for all of the
+ // arguments. Make this function and return.
+
+ // First, create the function.
+ FunctionType *STy=FunctionType::get(RetTy, std::vector<const Type*>(), false);
+ Function *Stub = Function::Create(STy, Function::InternalLinkage, "",
+ F->getParent());
+
+ // Insert a basic block.
+ BasicBlock *StubBB = BasicBlock::Create("", Stub);
+
+ // Convert all of the GenericValue arguments over to constants. Note that we
+ // currently don't support varargs.
+ SmallVector<Value*, 8> Args;
+ for (unsigned i = 0, e = ArgValues.size(); i != e; ++i) {
+ Constant *C = 0;
+ const Type *ArgTy = FTy->getParamType(i);
+ const GenericValue &AV = ArgValues[i];
+ switch (ArgTy->getTypeID()) {
+ default: assert(0 && "Unknown argument type for function call!");
+ case Type::IntegerTyID:
+ C = ConstantInt::get(AV.IntVal);
+ break;
+ case Type::FloatTyID:
+ C = ConstantFP::get(APFloat(AV.FloatVal));
+ break;
+ case Type::DoubleTyID:
+ C = ConstantFP::get(APFloat(AV.DoubleVal));
+ break;
+ case Type::PPC_FP128TyID:
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ C = ConstantFP::get(APFloat(AV.IntVal));
+ break;
+ case Type::PointerTyID:
+ void *ArgPtr = GVTOP(AV);
+ if (sizeof(void*) == 4)
+ C = ConstantInt::get(Type::Int32Ty, (int)(intptr_t)ArgPtr);
+ else
+ C = ConstantInt::get(Type::Int64Ty, (intptr_t)ArgPtr);
+ C = ConstantExpr::getIntToPtr(C, ArgTy); // Cast the integer to pointer
+ break;
+ }
+ Args.push_back(C);
+ }
+
+ CallInst *TheCall = CallInst::Create(F, Args.begin(), Args.end(),
+ "", StubBB);
+ TheCall->setCallingConv(F->getCallingConv());
+ TheCall->setTailCall();
+ if (TheCall->getType() != Type::VoidTy)
+ ReturnInst::Create(TheCall, StubBB); // Return result of the call.
+ else
+ ReturnInst::Create(StubBB); // Just return void.
+
+ // Finally, return the value returned by our nullary stub function.
+ return runFunction(Stub, std::vector<GenericValue>());
+}
+
+/// runJITOnFunction - Run the FunctionPassManager full of
+/// just-in-time compilation passes on F, hopefully filling in
+/// GlobalAddress[F] with the address of F's machine code.
+///
+void JIT::runJITOnFunction(Function *F, MachineCodeInfo *MCI) {
+ MutexGuard locked(lock);
+
+ registerMachineCodeInfo(MCI);
+
+ runJITOnFunctionUnlocked(F, locked);
+
+ registerMachineCodeInfo(0);
+}
+
+void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) {
+ static bool isAlreadyCodeGenerating = false;
+ assert(!isAlreadyCodeGenerating && "Error: Recursive compilation detected!");
+
+ // JIT the function
+ isAlreadyCodeGenerating = true;
+ jitstate->getPM(locked).run(*F);
+ isAlreadyCodeGenerating = false;
+
+ // If the function referred to another function that had not yet been
+ // read from bitcode, but we are jitting non-lazily, emit it now.
+ while (!jitstate->getPendingFunctions(locked).empty()) {
+ Function *PF = jitstate->getPendingFunctions(locked).back();
+ jitstate->getPendingFunctions(locked).pop_back();
+
+ // JIT the function
+ isAlreadyCodeGenerating = true;
+ jitstate->getPM(locked).run(*PF);
+ isAlreadyCodeGenerating = false;
+
+ // Now that the function has been jitted, ask the JITEmitter to rewrite
+ // the stub with real address of the function.
+ updateFunctionStub(PF);
+ }
+
+ // If the JIT is configured to emit info so that dlsym can be used to
+ // rewrite stubs to external globals, do so now.
+ if (areDlsymStubsEnabled() && isLazyCompilationDisabled())
+ updateDlsymStubTable();
+}
+
+/// getPointerToFunction - This method is used to get the address of the
+/// specified function, compiling it if neccesary.
+///
+void *JIT::getPointerToFunction(Function *F) {
+
+ if (void *Addr = getPointerToGlobalIfAvailable(F))
+ return Addr; // Check if function already code gen'd
+
+ MutexGuard locked(lock);
+
+ // Make sure we read in the function if it exists in this Module.
+ if (F->hasNotBeenReadFromBitcode()) {
+ // Determine the module provider this function is provided by.
+ Module *M = F->getParent();
+ ModuleProvider *MP = 0;
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
+ if (Modules[i]->getModule() == M) {
+ MP = Modules[i];
+ break;
+ }
+ }
+ assert(MP && "Function isn't in a module we know about!");
+
+ std::string ErrorMsg;
+ if (MP->materializeFunction(F, &ErrorMsg)) {
+ cerr << "Error reading function '" << F->getName()
+ << "' from bitcode file: " << ErrorMsg << "\n";
+ abort();
+ }
+
+ // Now retry to get the address.
+ if (void *Addr = getPointerToGlobalIfAvailable(F))
+ return Addr;
+ }
+
+ if (F->isDeclaration()) {
+ bool AbortOnFailure =
+ !areDlsymStubsEnabled() && !F->hasExternalWeakLinkage();
+ void *Addr = getPointerToNamedFunction(F->getName(), AbortOnFailure);
+ addGlobalMapping(F, Addr);
+ return Addr;
+ }
+
+ runJITOnFunctionUnlocked(F, locked);
+
+ void *Addr = getPointerToGlobalIfAvailable(F);
+ assert(Addr && "Code generation didn't add function to GlobalAddress table!");
+ return Addr;
+}
+
+/// getOrEmitGlobalVariable - Return the address of the specified global
+/// variable, possibly emitting it to memory if needed. This is used by the
+/// Emitter.
+void *JIT::getOrEmitGlobalVariable(const GlobalVariable *GV) {
+ MutexGuard locked(lock);
+
+ void *Ptr = getPointerToGlobalIfAvailable(GV);
+ if (Ptr) return Ptr;
+
+ // If the global is external, just remember the address.
+ if (GV->isDeclaration()) {
+#if HAVE___DSO_HANDLE
+ if (GV->getName() == "__dso_handle")
+ return (void*)&__dso_handle;
+#endif
+ Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(GV->getName().c_str());
+ if (Ptr == 0 && !areDlsymStubsEnabled()) {
+ cerr << "Could not resolve external global address: "
+ << GV->getName() << "\n";
+ abort();
+ }
+ addGlobalMapping(GV, Ptr);
+ } else {
+ // GlobalVariable's which are not "constant" will cause trouble in a server
+ // situation. It's returned in the same block of memory as code which may
+ // not be writable.
+ if (isGVCompilationDisabled() && !GV->isConstant()) {
+ cerr << "Compilation of non-internal GlobalValue is disabled!\n";
+ abort();
+ }
+ // If the global hasn't been emitted to memory yet, allocate space and
+ // emit it into memory. It goes in the same array as the generated
+ // code, jump tables, etc.
+ const Type *GlobalType = GV->getType()->getElementType();
+ size_t S = getTargetData()->getTypeAllocSize(GlobalType);
+ size_t A = getTargetData()->getPreferredAlignment(GV);
+ if (GV->isThreadLocal()) {
+ MutexGuard locked(lock);
+ Ptr = TJI.allocateThreadLocalMemory(S);
+ } else if (TJI.allocateSeparateGVMemory()) {
+ if (A <= 8) {
+ Ptr = malloc(S);
+ } else {
+ // Allocate S+A bytes of memory, then use an aligned pointer within that
+ // space.
+ Ptr = malloc(S+A);
+ unsigned MisAligned = ((intptr_t)Ptr & (A-1));
+ Ptr = (char*)Ptr + (MisAligned ? (A-MisAligned) : 0);
+ }
+ } else {
+ Ptr = JCE->allocateSpace(S, A);
+ }
+ addGlobalMapping(GV, Ptr);
+ EmitGlobalVariable(GV);
+ }
+ return Ptr;
+}
+
+/// recompileAndRelinkFunction - This method is used to force a function
+/// which has already been compiled, to be compiled again, possibly
+/// after it has been modified. Then the entry to the old copy is overwritten
+/// with a branch to the new copy. If there was no old copy, this acts
+/// just like JIT::getPointerToFunction().
+///
+void *JIT::recompileAndRelinkFunction(Function *F) {
+ void *OldAddr = getPointerToGlobalIfAvailable(F);
+
+ // If it's not already compiled there is no reason to patch it up.
+ if (OldAddr == 0) { return getPointerToFunction(F); }
+
+ // Delete the old function mapping.
+ addGlobalMapping(F, 0);
+
+ // Recodegen the function
+ runJITOnFunction(F);
+
+ // Update state, forward the old function to the new function.
+ void *Addr = getPointerToGlobalIfAvailable(F);
+ assert(Addr && "Code generation didn't add function to GlobalAddress table!");
+ TJI.replaceMachineCodeForFunction(OldAddr, Addr);
+ return Addr;
+}
+
+/// getMemoryForGV - This method abstracts memory allocation of global
+/// variable so that the JIT can allocate thread local variables depending
+/// on the target.
+///
+char* JIT::getMemoryForGV(const GlobalVariable* GV) {
+ const Type *ElTy = GV->getType()->getElementType();
+ size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
+ if (GV->isThreadLocal()) {
+ MutexGuard locked(lock);
+ return TJI.allocateThreadLocalMemory(GVSize);
+ } else {
+ return new char[GVSize];
+ }
+}
+
+void JIT::addPendingFunction(Function *F) {
+ MutexGuard locked(lock);
+ jitstate->getPendingFunctions(locked).push_back(F);
+}
diff --git a/lib/ExecutionEngine/JIT/JIT.h b/lib/ExecutionEngine/JIT/JIT.h
new file mode 100644
index 0000000..3ccb2dd
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JIT.h
@@ -0,0 +1,176 @@
+//===-- JIT.h - Class definition for the JIT --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the top-level JIT data structure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef JIT_H
+#define JIT_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/PassManager.h"
+
+namespace llvm {
+
+class Function;
+class TargetMachine;
+class TargetJITInfo;
+class MachineCodeEmitter;
+class MachineCodeInfo;
+
+class JITState {
+private:
+ FunctionPassManager PM; // Passes to compile a function
+ ModuleProvider *MP; // ModuleProvider used to create the PM
+
+ /// PendingFunctions - Functions which have not been code generated yet, but
+ /// were called from a function being code generated.
+ std::vector<Function*> PendingFunctions;
+
+public:
+ explicit JITState(ModuleProvider *MP) : PM(MP), MP(MP) {}
+
+ FunctionPassManager &getPM(const MutexGuard &L) {
+ return PM;
+ }
+
+ ModuleProvider *getMP() const { return MP; }
+ std::vector<Function*> &getPendingFunctions(const MutexGuard &L) {
+ return PendingFunctions;
+ }
+};
+
+
+class JIT : public ExecutionEngine {
+ TargetMachine &TM; // The current target we are compiling to
+ TargetJITInfo &TJI; // The JITInfo for the target we are compiling to
+ JITCodeEmitter *JCE; // JCE object
+
+ JITState *jitstate;
+
+ JIT(ModuleProvider *MP, TargetMachine &tm, TargetJITInfo &tji,
+ JITMemoryManager *JMM, CodeGenOpt::Level OptLevel);
+public:
+ ~JIT();
+
+ static void Register() {
+ JITCtor = create;
+ }
+
+ /// getJITInfo - Return the target JIT information structure.
+ ///
+ TargetJITInfo &getJITInfo() const { return TJI; }
+
+ /// create - Create an return a new JIT compiler if there is one available
+ /// for the current target. Otherwise, return null.
+ ///
+ static ExecutionEngine *create(ModuleProvider *MP, std::string *Err,
+ CodeGenOpt::Level OptLevel =
+ CodeGenOpt::Default) {
+ return createJIT(MP, Err, 0, OptLevel);
+ }
+
+ virtual void addModuleProvider(ModuleProvider *MP);
+
+ /// removeModuleProvider - Remove a ModuleProvider from the list of modules.
+ /// Relases the Module from the ModuleProvider, materializing it in the
+ /// process, and returns the materialized Module.
+ virtual Module *removeModuleProvider(ModuleProvider *MP,
+ std::string *ErrInfo = 0);
+
+ /// deleteModuleProvider - Remove a ModuleProvider from the list of modules,
+ /// and deletes the ModuleProvider and owned Module. Avoids materializing
+ /// the underlying module.
+ virtual void deleteModuleProvider(ModuleProvider *P,std::string *ErrInfo = 0);
+
+ /// runFunction - Start execution with the specified function and arguments.
+ ///
+ virtual GenericValue runFunction(Function *F,
+ const std::vector<GenericValue> &ArgValues);
+
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function by using the dlsym function call. As such it is only
+ /// useful for resolving library symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function silently returns a null pointer. Otherwise,
+ /// it prints a message to stderr and aborts.
+ ///
+ void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true);
+
+ // CompilationCallback - Invoked the first time that a call site is found,
+ // which causes lazy compilation of the target function.
+ //
+ static void CompilationCallback();
+
+ /// getPointerToFunction - This returns the address of the specified function,
+ /// compiling it if necessary.
+ ///
+ void *getPointerToFunction(Function *F);
+
+ /// getOrEmitGlobalVariable - Return the address of the specified global
+ /// variable, possibly emitting it to memory if needed. This is used by the
+ /// Emitter.
+ void *getOrEmitGlobalVariable(const GlobalVariable *GV);
+
+ /// getPointerToFunctionOrStub - If the specified function has been
+ /// code-gen'd, return a pointer to the function. If not, compile it, or use
+ /// a stub to implement lazy compilation if available.
+ ///
+ void *getPointerToFunctionOrStub(Function *F);
+
+ /// recompileAndRelinkFunction - This method is used to force a function
+ /// which has already been compiled, to be compiled again, possibly
+ /// after it has been modified. Then the entry to the old copy is overwritten
+ /// with a branch to the new copy. If there was no old copy, this acts
+ /// just like JIT::getPointerToFunction().
+ ///
+ void *recompileAndRelinkFunction(Function *F);
+
+ /// freeMachineCodeForFunction - deallocate memory used to code-generate this
+ /// Function.
+ ///
+ void freeMachineCodeForFunction(Function *F);
+
+ /// addPendingFunction - while jitting non-lazily, a called but non-codegen'd
+ /// function was encountered. Add it to a pending list to be processed after
+ /// the current function.
+ ///
+ void addPendingFunction(Function *F);
+
+ /// getCodeEmitter - Return the code emitter this JIT is emitting into.
+ JITCodeEmitter *getCodeEmitter() const { return JCE; }
+
+ static ExecutionEngine *createJIT(ModuleProvider *MP, std::string *Err,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel);
+
+
+ // Run the JIT on F and return information about the generated code
+ void runJITOnFunction(Function *F, MachineCodeInfo *MCI = 0);
+
+private:
+ static JITCodeEmitter *createEmitter(JIT &J, JITMemoryManager *JMM);
+ void registerMachineCodeInfo(MachineCodeInfo *MCI);
+ void runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked);
+ void updateFunctionStub(Function *F);
+ void updateDlsymStubTable();
+
+protected:
+
+ /// getMemoryforGV - Allocate memory for a global variable.
+ virtual char* getMemoryForGV(const GlobalVariable* GV);
+
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
new file mode 100644
index 0000000..e101ef3
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
@@ -0,0 +1,1056 @@
+//===----- JITDwarfEmitter.cpp - Write dwarf tables into memory -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITDwarfEmitter object that is used by the JIT to
+// write dwarf tables to memory.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JIT.h"
+#include "JITDwarfEmitter.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/JITCodeEmitter.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLocation.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/Target/TargetAsmInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+JITDwarfEmitter::JITDwarfEmitter(JIT& theJit) : Jit(theJit) {}
+
+
+unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F,
+ JITCodeEmitter& jce,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction) {
+ const TargetMachine& TM = F.getTarget();
+ TD = TM.getTargetData();
+ needsIndirectEncoding = TM.getTargetAsmInfo()->getNeedsIndirectEncoding();
+ stackGrowthDirection = TM.getFrameInfo()->getStackGrowthDirection();
+ RI = TM.getRegisterInfo();
+ JCE = &jce;
+
+ unsigned char* ExceptionTable = EmitExceptionTable(&F, StartFunction,
+ EndFunction);
+
+ unsigned char* Result = 0;
+ unsigned char* EHFramePtr = 0;
+
+ const std::vector<Function *> Personalities = MMI->getPersonalities();
+ EHFramePtr = EmitCommonEHFrame(Personalities[MMI->getPersonalityIndex()]);
+
+ Result = EmitEHFrame(Personalities[MMI->getPersonalityIndex()], EHFramePtr,
+ StartFunction, EndFunction, ExceptionTable);
+
+ return Result;
+}
+
+
+void
+JITDwarfEmitter::EmitFrameMoves(intptr_t BaseLabelPtr,
+ const std::vector<MachineMove> &Moves) const {
+ unsigned PointerSize = TD->getPointerSize();
+ int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
+ PointerSize : -PointerSize;
+ bool IsLocal = false;
+ unsigned BaseLabelID = 0;
+
+ for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
+ const MachineMove &Move = Moves[i];
+ unsigned LabelID = Move.getLabelID();
+
+ if (LabelID) {
+ LabelID = MMI->MappedLabel(LabelID);
+
+ // Throw out move if the label is invalid.
+ if (!LabelID) continue;
+ }
+
+ intptr_t LabelPtr = 0;
+ if (LabelID) LabelPtr = JCE->getLabelAddress(LabelID);
+
+ const MachineLocation &Dst = Move.getDestination();
+ const MachineLocation &Src = Move.getSource();
+
+ // Advance row if new location.
+ if (BaseLabelPtr && LabelID && (BaseLabelID != LabelID || !IsLocal)) {
+ JCE->emitByte(dwarf::DW_CFA_advance_loc4);
+ JCE->emitInt32(LabelPtr - BaseLabelPtr);
+
+ BaseLabelID = LabelID;
+ BaseLabelPtr = LabelPtr;
+ IsLocal = true;
+ }
+
+ // If advancing cfa.
+ if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) {
+ if (!Src.isReg()) {
+ if (Src.getReg() == MachineLocation::VirtualFP) {
+ JCE->emitByte(dwarf::DW_CFA_def_cfa_offset);
+ } else {
+ JCE->emitByte(dwarf::DW_CFA_def_cfa);
+ JCE->emitULEB128Bytes(RI->getDwarfRegNum(Src.getReg(), true));
+ }
+
+ int Offset = -Src.getOffset();
+
+ JCE->emitULEB128Bytes(Offset);
+ } else {
+ assert(0 && "Machine move no supported yet.");
+ }
+ } else if (Src.isReg() &&
+ Src.getReg() == MachineLocation::VirtualFP) {
+ if (Dst.isReg()) {
+ JCE->emitByte(dwarf::DW_CFA_def_cfa_register);
+ JCE->emitULEB128Bytes(RI->getDwarfRegNum(Dst.getReg(), true));
+ } else {
+ assert(0 && "Machine move no supported yet.");
+ }
+ } else {
+ unsigned Reg = RI->getDwarfRegNum(Src.getReg(), true);
+ int Offset = Dst.getOffset() / stackGrowth;
+
+ if (Offset < 0) {
+ JCE->emitByte(dwarf::DW_CFA_offset_extended_sf);
+ JCE->emitULEB128Bytes(Reg);
+ JCE->emitSLEB128Bytes(Offset);
+ } else if (Reg < 64) {
+ JCE->emitByte(dwarf::DW_CFA_offset + Reg);
+ JCE->emitULEB128Bytes(Offset);
+ } else {
+ JCE->emitByte(dwarf::DW_CFA_offset_extended);
+ JCE->emitULEB128Bytes(Reg);
+ JCE->emitULEB128Bytes(Offset);
+ }
+ }
+ }
+}
+
+/// SharedTypeIds - How many leading type ids two landing pads have in common.
+static unsigned SharedTypeIds(const LandingPadInfo *L,
+ const LandingPadInfo *R) {
+ const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
+ unsigned LSize = LIds.size(), RSize = RIds.size();
+ unsigned MinSize = LSize < RSize ? LSize : RSize;
+ unsigned Count = 0;
+
+ for (; Count != MinSize; ++Count)
+ if (LIds[Count] != RIds[Count])
+ return Count;
+
+ return Count;
+}
+
+
+/// PadLT - Order landing pads lexicographically by type id.
+static bool PadLT(const LandingPadInfo *L, const LandingPadInfo *R) {
+ const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
+ unsigned LSize = LIds.size(), RSize = RIds.size();
+ unsigned MinSize = LSize < RSize ? LSize : RSize;
+
+ for (unsigned i = 0; i != MinSize; ++i)
+ if (LIds[i] != RIds[i])
+ return LIds[i] < RIds[i];
+
+ return LSize < RSize;
+}
+
+namespace {
+
+struct KeyInfo {
+ static inline unsigned getEmptyKey() { return -1U; }
+ static inline unsigned getTombstoneKey() { return -2U; }
+ static unsigned getHashValue(const unsigned &Key) { return Key; }
+ static bool isEqual(unsigned LHS, unsigned RHS) { return LHS == RHS; }
+ static bool isPod() { return true; }
+};
+
+/// ActionEntry - Structure describing an entry in the actions table.
+struct ActionEntry {
+ int ValueForTypeID; // The value to write - may not be equal to the type id.
+ int NextAction;
+ struct ActionEntry *Previous;
+};
+
+/// PadRange - Structure holding a try-range and the associated landing pad.
+struct PadRange {
+ // The index of the landing pad.
+ unsigned PadIndex;
+ // The index of the begin and end labels in the landing pad's label lists.
+ unsigned RangeIndex;
+};
+
+typedef DenseMap<unsigned, PadRange, KeyInfo> RangeMapType;
+
+/// CallSiteEntry - Structure describing an entry in the call-site table.
+struct CallSiteEntry {
+ unsigned BeginLabel; // zero indicates the start of the function.
+ unsigned EndLabel; // zero indicates the end of the function.
+ unsigned PadLabel; // zero indicates that there is no landing pad.
+ unsigned Action;
+};
+
+}
+
+unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction) const {
+ // Map all labels and get rid of any dead landing pads.
+ MMI->TidyLandingPads();
+
+ const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
+ const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
+ const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
+ if (PadInfos.empty()) return 0;
+
+ // Sort the landing pads in order of their type ids. This is used to fold
+ // duplicate actions.
+ SmallVector<const LandingPadInfo *, 64> LandingPads;
+ LandingPads.reserve(PadInfos.size());
+ for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
+ LandingPads.push_back(&PadInfos[i]);
+ std::sort(LandingPads.begin(), LandingPads.end(), PadLT);
+
+ // Negative type ids index into FilterIds, positive type ids index into
+ // TypeInfos. The value written for a positive type id is just the type
+ // id itself. For a negative type id, however, the value written is the
+ // (negative) byte offset of the corresponding FilterIds entry. The byte
+ // offset is usually equal to the type id, because the FilterIds entries
+ // are written using a variable width encoding which outputs one byte per
+ // entry as long as the value written is not too large, but can differ.
+ // This kind of complication does not occur for positive type ids because
+ // type infos are output using a fixed width encoding.
+ // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i].
+ SmallVector<int, 16> FilterOffsets;
+ FilterOffsets.reserve(FilterIds.size());
+ int Offset = -1;
+ for(std::vector<unsigned>::const_iterator I = FilterIds.begin(),
+ E = FilterIds.end(); I != E; ++I) {
+ FilterOffsets.push_back(Offset);
+ Offset -= TargetAsmInfo::getULEB128Size(*I);
+ }
+
+ // Compute the actions table and gather the first action index for each
+ // landing pad site.
+ SmallVector<ActionEntry, 32> Actions;
+ SmallVector<unsigned, 64> FirstActions;
+ FirstActions.reserve(LandingPads.size());
+
+ int FirstAction = 0;
+ unsigned SizeActions = 0;
+ for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
+ const LandingPadInfo *LP = LandingPads[i];
+ const std::vector<int> &TypeIds = LP->TypeIds;
+ const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0;
+ unsigned SizeSiteActions = 0;
+
+ if (NumShared < TypeIds.size()) {
+ unsigned SizeAction = 0;
+ ActionEntry *PrevAction = 0;
+
+ if (NumShared) {
+ const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size();
+ assert(Actions.size());
+ PrevAction = &Actions.back();
+ SizeAction = TargetAsmInfo::getSLEB128Size(PrevAction->NextAction) +
+ TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
+ for (unsigned j = NumShared; j != SizePrevIds; ++j) {
+ SizeAction -= TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
+ SizeAction += -PrevAction->NextAction;
+ PrevAction = PrevAction->Previous;
+ }
+ }
+
+ // Compute the actions.
+ for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) {
+ int TypeID = TypeIds[I];
+ assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
+ int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
+ unsigned SizeTypeID = TargetAsmInfo::getSLEB128Size(ValueForTypeID);
+
+ int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
+ SizeAction = SizeTypeID + TargetAsmInfo::getSLEB128Size(NextAction);
+ SizeSiteActions += SizeAction;
+
+ ActionEntry Action = {ValueForTypeID, NextAction, PrevAction};
+ Actions.push_back(Action);
+
+ PrevAction = &Actions.back();
+ }
+
+ // Record the first action of the landing pad site.
+ FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
+ } // else identical - re-use previous FirstAction
+
+ FirstActions.push_back(FirstAction);
+
+ // Compute this sites contribution to size.
+ SizeActions += SizeSiteActions;
+ }
+
+ // Compute the call-site table. Entries must be ordered by address.
+ SmallVector<CallSiteEntry, 64> CallSites;
+
+ RangeMapType PadMap;
+ for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
+ const LandingPadInfo *LandingPad = LandingPads[i];
+ for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
+ unsigned BeginLabel = LandingPad->BeginLabels[j];
+ assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
+ PadRange P = { i, j };
+ PadMap[BeginLabel] = P;
+ }
+ }
+
+ bool MayThrow = false;
+ unsigned LastLabel = 0;
+ for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
+ I != E; ++I) {
+ for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
+ MI != E; ++MI) {
+ if (!MI->isLabel()) {
+ MayThrow |= MI->getDesc().isCall();
+ continue;
+ }
+
+ unsigned BeginLabel = MI->getOperand(0).getImm();
+ assert(BeginLabel && "Invalid label!");
+
+ if (BeginLabel == LastLabel)
+ MayThrow = false;
+
+ RangeMapType::iterator L = PadMap.find(BeginLabel);
+
+ if (L == PadMap.end())
+ continue;
+
+ PadRange P = L->second;
+ const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
+
+ assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
+ "Inconsistent landing pad map!");
+
+ // If some instruction between the previous try-range and this one may
+ // throw, create a call-site entry with no landing pad for the region
+ // between the try-ranges.
+ if (MayThrow) {
+ CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0};
+ CallSites.push_back(Site);
+ }
+
+ LastLabel = LandingPad->EndLabels[P.RangeIndex];
+ CallSiteEntry Site = {BeginLabel, LastLabel,
+ LandingPad->LandingPadLabel, FirstActions[P.PadIndex]};
+
+ assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel &&
+ "Invalid landing pad!");
+
+ // Try to merge with the previous call-site.
+ if (CallSites.size()) {
+ CallSiteEntry &Prev = CallSites.back();
+ if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
+ // Extend the range of the previous entry.
+ Prev.EndLabel = Site.EndLabel;
+ continue;
+ }
+ }
+
+ // Otherwise, create a new call-site.
+ CallSites.push_back(Site);
+ }
+ }
+ // If some instruction between the previous try-range and the end of the
+ // function may throw, create a call-site entry with no landing pad for the
+ // region following the try-range.
+ if (MayThrow) {
+ CallSiteEntry Site = {LastLabel, 0, 0, 0};
+ CallSites.push_back(Site);
+ }
+
+ // Final tallies.
+ unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start.
+ sizeof(int32_t) + // Site length.
+ sizeof(int32_t)); // Landing pad.
+ for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
+ SizeSites += TargetAsmInfo::getULEB128Size(CallSites[i].Action);
+
+ unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();
+
+ unsigned TypeOffset = sizeof(int8_t) + // Call site format
+ // Call-site table length
+ TargetAsmInfo::getULEB128Size(SizeSites) +
+ SizeSites + SizeActions + SizeTypes;
+
+ unsigned TotalSize = sizeof(int8_t) + // LPStart format
+ sizeof(int8_t) + // TType format
+ TargetAsmInfo::getULEB128Size(TypeOffset) + // TType base offset
+ TypeOffset;
+
+ unsigned SizeAlign = (4 - TotalSize) & 3;
+
+ // Begin the exception table.
+ JCE->emitAlignment(4);
+ for (unsigned i = 0; i != SizeAlign; ++i) {
+ JCE->emitByte(0);
+ // Asm->EOL("Padding");
+ }
+
+ unsigned char* DwarfExceptionTable = (unsigned char*)JCE->getCurrentPCValue();
+
+ // Emit the header.
+ JCE->emitByte(dwarf::DW_EH_PE_omit);
+ // Asm->EOL("LPStart format (DW_EH_PE_omit)");
+ JCE->emitByte(dwarf::DW_EH_PE_absptr);
+ // Asm->EOL("TType format (DW_EH_PE_absptr)");
+ JCE->emitULEB128Bytes(TypeOffset);
+ // Asm->EOL("TType base offset");
+ JCE->emitByte(dwarf::DW_EH_PE_udata4);
+ // Asm->EOL("Call site format (DW_EH_PE_udata4)");
+ JCE->emitULEB128Bytes(SizeSites);
+ // Asm->EOL("Call-site table length");
+
+ // Emit the landing pad site information.
+ for (unsigned i = 0; i < CallSites.size(); ++i) {
+ CallSiteEntry &S = CallSites[i];
+ intptr_t BeginLabelPtr = 0;
+ intptr_t EndLabelPtr = 0;
+
+ if (!S.BeginLabel) {
+ BeginLabelPtr = (intptr_t)StartFunction;
+ JCE->emitInt32(0);
+ } else {
+ BeginLabelPtr = JCE->getLabelAddress(S.BeginLabel);
+ JCE->emitInt32(BeginLabelPtr - (intptr_t)StartFunction);
+ }
+
+ // Asm->EOL("Region start");
+
+ if (!S.EndLabel) {
+ EndLabelPtr = (intptr_t)EndFunction;
+ JCE->emitInt32((intptr_t)EndFunction - BeginLabelPtr);
+ } else {
+ EndLabelPtr = JCE->getLabelAddress(S.EndLabel);
+ JCE->emitInt32(EndLabelPtr - BeginLabelPtr);
+ }
+ //Asm->EOL("Region length");
+
+ if (!S.PadLabel) {
+ JCE->emitInt32(0);
+ } else {
+ unsigned PadLabelPtr = JCE->getLabelAddress(S.PadLabel);
+ JCE->emitInt32(PadLabelPtr - (intptr_t)StartFunction);
+ }
+ // Asm->EOL("Landing pad");
+
+ JCE->emitULEB128Bytes(S.Action);
+ // Asm->EOL("Action");
+ }
+
+ // Emit the actions.
+ for (unsigned I = 0, N = Actions.size(); I != N; ++I) {
+ ActionEntry &Action = Actions[I];
+
+ JCE->emitSLEB128Bytes(Action.ValueForTypeID);
+ //Asm->EOL("TypeInfo index");
+ JCE->emitSLEB128Bytes(Action.NextAction);
+ //Asm->EOL("Next action");
+ }
+
+ // Emit the type ids.
+ for (unsigned M = TypeInfos.size(); M; --M) {
+ GlobalVariable *GV = TypeInfos[M - 1];
+
+ if (GV) {
+ if (TD->getPointerSize() == sizeof(int32_t)) {
+ JCE->emitInt32((intptr_t)Jit.getOrEmitGlobalVariable(GV));
+ } else {
+ JCE->emitInt64((intptr_t)Jit.getOrEmitGlobalVariable(GV));
+ }
+ } else {
+ if (TD->getPointerSize() == sizeof(int32_t))
+ JCE->emitInt32(0);
+ else
+ JCE->emitInt64(0);
+ }
+ // Asm->EOL("TypeInfo");
+ }
+
+ // Emit the filter typeids.
+ for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) {
+ unsigned TypeID = FilterIds[j];
+ JCE->emitULEB128Bytes(TypeID);
+ //Asm->EOL("Filter TypeInfo index");
+ }
+
+ JCE->emitAlignment(4);
+
+ return DwarfExceptionTable;
+}
+
+unsigned char*
+JITDwarfEmitter::EmitCommonEHFrame(const Function* Personality) const {
+ unsigned PointerSize = TD->getPointerSize();
+ int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
+ PointerSize : -PointerSize;
+
+ unsigned char* StartCommonPtr = (unsigned char*)JCE->getCurrentPCValue();
+ // EH Common Frame header
+ JCE->allocateSpace(4, 0);
+ unsigned char* FrameCommonBeginPtr = (unsigned char*)JCE->getCurrentPCValue();
+ JCE->emitInt32((int)0);
+ JCE->emitByte(dwarf::DW_CIE_VERSION);
+ JCE->emitString(Personality ? "zPLR" : "zR");
+ JCE->emitULEB128Bytes(1);
+ JCE->emitSLEB128Bytes(stackGrowth);
+ JCE->emitByte(RI->getDwarfRegNum(RI->getRARegister(), true));
+
+ if (Personality) {
+ // Augmentation Size: 3 small ULEBs of one byte each, and the personality
+ // function which size is PointerSize.
+ JCE->emitULEB128Bytes(3 + PointerSize);
+
+ // We set the encoding of the personality as direct encoding because we use
+ // the function pointer. The encoding is not relative because the current
+ // PC value may be bigger than the personality function pointer.
+ if (PointerSize == 4) {
+ JCE->emitByte(dwarf::DW_EH_PE_sdata4);
+ JCE->emitInt32(((intptr_t)Jit.getPointerToGlobal(Personality)));
+ } else {
+ JCE->emitByte(dwarf::DW_EH_PE_sdata8);
+ JCE->emitInt64(((intptr_t)Jit.getPointerToGlobal(Personality)));
+ }
+
+ JCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
+ JCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
+
+ } else {
+ JCE->emitULEB128Bytes(1);
+ JCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
+ }
+
+ std::vector<MachineMove> Moves;
+ RI->getInitialFrameState(Moves);
+ EmitFrameMoves(0, Moves);
+ JCE->emitAlignment(PointerSize);
+
+ JCE->emitInt32At((uintptr_t*)StartCommonPtr,
+ (uintptr_t)((unsigned char*)JCE->getCurrentPCValue() -
+ FrameCommonBeginPtr));
+
+ return StartCommonPtr;
+}
+
+
+unsigned char*
+JITDwarfEmitter::EmitEHFrame(const Function* Personality,
+ unsigned char* StartCommonPtr,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction,
+ unsigned char* ExceptionTable) const {
+ unsigned PointerSize = TD->getPointerSize();
+
+ // EH frame header.
+ unsigned char* StartEHPtr = (unsigned char*)JCE->getCurrentPCValue();
+ JCE->allocateSpace(4, 0);
+ unsigned char* FrameBeginPtr = (unsigned char*)JCE->getCurrentPCValue();
+ // FDE CIE Offset
+ JCE->emitInt32(FrameBeginPtr - StartCommonPtr);
+ JCE->emitInt32(StartFunction - (unsigned char*)JCE->getCurrentPCValue());
+ JCE->emitInt32(EndFunction - StartFunction);
+
+ // If there is a personality and landing pads then point to the language
+ // specific data area in the exception table.
+ if (MMI->getPersonalityIndex()) {
+ JCE->emitULEB128Bytes(4);
+
+ if (!MMI->getLandingPads().empty()) {
+ JCE->emitInt32(ExceptionTable - (unsigned char*)JCE->getCurrentPCValue());
+ } else {
+ JCE->emitInt32((int)0);
+ }
+ } else {
+ JCE->emitULEB128Bytes(0);
+ }
+
+ // Indicate locations of function specific callee saved registers in
+ // frame.
+ EmitFrameMoves((intptr_t)StartFunction, MMI->getFrameMoves());
+
+ JCE->emitAlignment(PointerSize);
+
+ // Indicate the size of the table
+ JCE->emitInt32At((uintptr_t*)StartEHPtr,
+ (uintptr_t)((unsigned char*)JCE->getCurrentPCValue() -
+ StartEHPtr));
+
+ // Double zeroes for the unwind runtime
+ if (PointerSize == 8) {
+ JCE->emitInt64(0);
+ JCE->emitInt64(0);
+ } else {
+ JCE->emitInt32(0);
+ JCE->emitInt32(0);
+ }
+
+
+ return StartEHPtr;
+}
+
+unsigned JITDwarfEmitter::GetDwarfTableSizeInBytes(MachineFunction& F,
+ JITCodeEmitter& jce,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction) {
+ const TargetMachine& TM = F.getTarget();
+ TD = TM.getTargetData();
+ needsIndirectEncoding = TM.getTargetAsmInfo()->getNeedsIndirectEncoding();
+ stackGrowthDirection = TM.getFrameInfo()->getStackGrowthDirection();
+ RI = TM.getRegisterInfo();
+ JCE = &jce;
+ unsigned FinalSize = 0;
+
+ FinalSize += GetExceptionTableSizeInBytes(&F);
+
+ const std::vector<Function *> Personalities = MMI->getPersonalities();
+ FinalSize +=
+ GetCommonEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()]);
+
+ FinalSize += GetEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()],
+ StartFunction);
+
+ return FinalSize;
+}
+
+/// RoundUpToAlign - Add the specified alignment to FinalSize and returns
+/// the new value.
+static unsigned RoundUpToAlign(unsigned FinalSize, unsigned Alignment) {
+ if (Alignment == 0) Alignment = 1;
+ // Since we do not know where the buffer will be allocated, be pessimistic.
+ return FinalSize + Alignment;
+}
+
+unsigned
+JITDwarfEmitter::GetEHFrameSizeInBytes(const Function* Personality,
+ unsigned char* StartFunction) const {
+ unsigned PointerSize = TD->getPointerSize();
+ unsigned FinalSize = 0;
+ // EH frame header.
+ FinalSize += PointerSize;
+ // FDE CIE Offset
+ FinalSize += 3 * PointerSize;
+ // If there is a personality and landing pads then point to the language
+ // specific data area in the exception table.
+ if (MMI->getPersonalityIndex()) {
+ FinalSize += TargetAsmInfo::getULEB128Size(4);
+ FinalSize += PointerSize;
+ } else {
+ FinalSize += TargetAsmInfo::getULEB128Size(0);
+ }
+
+ // Indicate locations of function specific callee saved registers in
+ // frame.
+ FinalSize += GetFrameMovesSizeInBytes((intptr_t)StartFunction,
+ MMI->getFrameMoves());
+
+ FinalSize = RoundUpToAlign(FinalSize, 4);
+
+ // Double zeroes for the unwind runtime
+ FinalSize += 2 * PointerSize;
+
+ return FinalSize;
+}
+
+unsigned JITDwarfEmitter::GetCommonEHFrameSizeInBytes(const Function* Personality)
+ const {
+
+ unsigned PointerSize = TD->getPointerSize();
+ int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
+ PointerSize : -PointerSize;
+ unsigned FinalSize = 0;
+ // EH Common Frame header
+ FinalSize += PointerSize;
+ FinalSize += 4;
+ FinalSize += 1;
+ FinalSize += Personality ? 5 : 3; // "zPLR" or "zR"
+ FinalSize += TargetAsmInfo::getULEB128Size(1);
+ FinalSize += TargetAsmInfo::getSLEB128Size(stackGrowth);
+ FinalSize += 1;
+
+ if (Personality) {
+ FinalSize += TargetAsmInfo::getULEB128Size(7);
+
+ // Encoding
+ FinalSize+= 1;
+ //Personality
+ FinalSize += PointerSize;
+
+ FinalSize += TargetAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
+ FinalSize += TargetAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
+
+ } else {
+ FinalSize += TargetAsmInfo::getULEB128Size(1);
+ FinalSize += TargetAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
+ }
+
+ std::vector<MachineMove> Moves;
+ RI->getInitialFrameState(Moves);
+ FinalSize += GetFrameMovesSizeInBytes(0, Moves);
+ FinalSize = RoundUpToAlign(FinalSize, 4);
+ return FinalSize;
+}
+
+unsigned
+JITDwarfEmitter::GetFrameMovesSizeInBytes(intptr_t BaseLabelPtr,
+ const std::vector<MachineMove> &Moves) const {
+ unsigned PointerSize = TD->getPointerSize();
+ int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
+ PointerSize : -PointerSize;
+ bool IsLocal = BaseLabelPtr;
+ unsigned FinalSize = 0;
+
+ for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
+ const MachineMove &Move = Moves[i];
+ unsigned LabelID = Move.getLabelID();
+
+ if (LabelID) {
+ LabelID = MMI->MappedLabel(LabelID);
+
+ // Throw out move if the label is invalid.
+ if (!LabelID) continue;
+ }
+
+ intptr_t LabelPtr = 0;
+ if (LabelID) LabelPtr = JCE->getLabelAddress(LabelID);
+
+ const MachineLocation &Dst = Move.getDestination();
+ const MachineLocation &Src = Move.getSource();
+
+ // Advance row if new location.
+ if (BaseLabelPtr && LabelID && (BaseLabelPtr != LabelPtr || !IsLocal)) {
+ FinalSize++;
+ FinalSize += PointerSize;
+ BaseLabelPtr = LabelPtr;
+ IsLocal = true;
+ }
+
+ // If advancing cfa.
+ if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) {
+ if (!Src.isReg()) {
+ if (Src.getReg() == MachineLocation::VirtualFP) {
+ ++FinalSize;
+ } else {
+ ++FinalSize;
+ unsigned RegNum = RI->getDwarfRegNum(Src.getReg(), true);
+ FinalSize += TargetAsmInfo::getULEB128Size(RegNum);
+ }
+
+ int Offset = -Src.getOffset();
+
+ FinalSize += TargetAsmInfo::getULEB128Size(Offset);
+ } else {
+ assert(0 && "Machine move no supported yet.");
+ }
+ } else if (Src.isReg() &&
+ Src.getReg() == MachineLocation::VirtualFP) {
+ if (Dst.isReg()) {
+ ++FinalSize;
+ unsigned RegNum = RI->getDwarfRegNum(Dst.getReg(), true);
+ FinalSize += TargetAsmInfo::getULEB128Size(RegNum);
+ } else {
+ assert(0 && "Machine move no supported yet.");
+ }
+ } else {
+ unsigned Reg = RI->getDwarfRegNum(Src.getReg(), true);
+ int Offset = Dst.getOffset() / stackGrowth;
+
+ if (Offset < 0) {
+ ++FinalSize;
+ FinalSize += TargetAsmInfo::getULEB128Size(Reg);
+ FinalSize += TargetAsmInfo::getSLEB128Size(Offset);
+ } else if (Reg < 64) {
+ ++FinalSize;
+ FinalSize += TargetAsmInfo::getULEB128Size(Offset);
+ } else {
+ ++FinalSize;
+ FinalSize += TargetAsmInfo::getULEB128Size(Reg);
+ FinalSize += TargetAsmInfo::getULEB128Size(Offset);
+ }
+ }
+ }
+ return FinalSize;
+}
+
+unsigned
+JITDwarfEmitter::GetExceptionTableSizeInBytes(MachineFunction* MF) const {
+ unsigned FinalSize = 0;
+
+ // Map all labels and get rid of any dead landing pads.
+ MMI->TidyLandingPads();
+
+ const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
+ const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
+ const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
+ if (PadInfos.empty()) return 0;
+
+ // Sort the landing pads in order of their type ids. This is used to fold
+ // duplicate actions.
+ SmallVector<const LandingPadInfo *, 64> LandingPads;
+ LandingPads.reserve(PadInfos.size());
+ for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
+ LandingPads.push_back(&PadInfos[i]);
+ std::sort(LandingPads.begin(), LandingPads.end(), PadLT);
+
+ // Negative type ids index into FilterIds, positive type ids index into
+ // TypeInfos. The value written for a positive type id is just the type
+ // id itself. For a negative type id, however, the value written is the
+ // (negative) byte offset of the corresponding FilterIds entry. The byte
+ // offset is usually equal to the type id, because the FilterIds entries
+ // are written using a variable width encoding which outputs one byte per
+ // entry as long as the value written is not too large, but can differ.
+ // This kind of complication does not occur for positive type ids because
+ // type infos are output using a fixed width encoding.
+ // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i].
+ SmallVector<int, 16> FilterOffsets;
+ FilterOffsets.reserve(FilterIds.size());
+ int Offset = -1;
+ for(std::vector<unsigned>::const_iterator I = FilterIds.begin(),
+ E = FilterIds.end(); I != E; ++I) {
+ FilterOffsets.push_back(Offset);
+ Offset -= TargetAsmInfo::getULEB128Size(*I);
+ }
+
+ // Compute the actions table and gather the first action index for each
+ // landing pad site.
+ SmallVector<ActionEntry, 32> Actions;
+ SmallVector<unsigned, 64> FirstActions;
+ FirstActions.reserve(LandingPads.size());
+
+ int FirstAction = 0;
+ unsigned SizeActions = 0;
+ for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
+ const LandingPadInfo *LP = LandingPads[i];
+ const std::vector<int> &TypeIds = LP->TypeIds;
+ const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0;
+ unsigned SizeSiteActions = 0;
+
+ if (NumShared < TypeIds.size()) {
+ unsigned SizeAction = 0;
+ ActionEntry *PrevAction = 0;
+
+ if (NumShared) {
+ const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size();
+ assert(Actions.size());
+ PrevAction = &Actions.back();
+ SizeAction = TargetAsmInfo::getSLEB128Size(PrevAction->NextAction) +
+ TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
+ for (unsigned j = NumShared; j != SizePrevIds; ++j) {
+ SizeAction -= TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
+ SizeAction += -PrevAction->NextAction;
+ PrevAction = PrevAction->Previous;
+ }
+ }
+
+ // Compute the actions.
+ for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) {
+ int TypeID = TypeIds[I];
+ assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
+ int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
+ unsigned SizeTypeID = TargetAsmInfo::getSLEB128Size(ValueForTypeID);
+
+ int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
+ SizeAction = SizeTypeID + TargetAsmInfo::getSLEB128Size(NextAction);
+ SizeSiteActions += SizeAction;
+
+ ActionEntry Action = {ValueForTypeID, NextAction, PrevAction};
+ Actions.push_back(Action);
+
+ PrevAction = &Actions.back();
+ }
+
+ // Record the first action of the landing pad site.
+ FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
+ } // else identical - re-use previous FirstAction
+
+ FirstActions.push_back(FirstAction);
+
+ // Compute this sites contribution to size.
+ SizeActions += SizeSiteActions;
+ }
+
+ // Compute the call-site table. Entries must be ordered by address.
+ SmallVector<CallSiteEntry, 64> CallSites;
+
+ RangeMapType PadMap;
+ for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
+ const LandingPadInfo *LandingPad = LandingPads[i];
+ for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
+ unsigned BeginLabel = LandingPad->BeginLabels[j];
+ assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
+ PadRange P = { i, j };
+ PadMap[BeginLabel] = P;
+ }
+ }
+
+ bool MayThrow = false;
+ unsigned LastLabel = 0;
+ for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
+ I != E; ++I) {
+ for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
+ MI != E; ++MI) {
+ if (!MI->isLabel()) {
+ MayThrow |= MI->getDesc().isCall();
+ continue;
+ }
+
+ unsigned BeginLabel = MI->getOperand(0).getImm();
+ assert(BeginLabel && "Invalid label!");
+
+ if (BeginLabel == LastLabel)
+ MayThrow = false;
+
+ RangeMapType::iterator L = PadMap.find(BeginLabel);
+
+ if (L == PadMap.end())
+ continue;
+
+ PadRange P = L->second;
+ const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
+
+ assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
+ "Inconsistent landing pad map!");
+
+ // If some instruction between the previous try-range and this one may
+ // throw, create a call-site entry with no landing pad for the region
+ // between the try-ranges.
+ if (MayThrow) {
+ CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0};
+ CallSites.push_back(Site);
+ }
+
+ LastLabel = LandingPad->EndLabels[P.RangeIndex];
+ CallSiteEntry Site = {BeginLabel, LastLabel,
+ LandingPad->LandingPadLabel, FirstActions[P.PadIndex]};
+
+ assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel &&
+ "Invalid landing pad!");
+
+ // Try to merge with the previous call-site.
+ if (CallSites.size()) {
+ CallSiteEntry &Prev = CallSites.back();
+ if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
+ // Extend the range of the previous entry.
+ Prev.EndLabel = Site.EndLabel;
+ continue;
+ }
+ }
+
+ // Otherwise, create a new call-site.
+ CallSites.push_back(Site);
+ }
+ }
+ // If some instruction between the previous try-range and the end of the
+ // function may throw, create a call-site entry with no landing pad for the
+ // region following the try-range.
+ if (MayThrow) {
+ CallSiteEntry Site = {LastLabel, 0, 0, 0};
+ CallSites.push_back(Site);
+ }
+
+ // Final tallies.
+ unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start.
+ sizeof(int32_t) + // Site length.
+ sizeof(int32_t)); // Landing pad.
+ for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
+ SizeSites += TargetAsmInfo::getULEB128Size(CallSites[i].Action);
+
+ unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();
+
+ unsigned TypeOffset = sizeof(int8_t) + // Call site format
+ // Call-site table length
+ TargetAsmInfo::getULEB128Size(SizeSites) +
+ SizeSites + SizeActions + SizeTypes;
+
+ unsigned TotalSize = sizeof(int8_t) + // LPStart format
+ sizeof(int8_t) + // TType format
+ TargetAsmInfo::getULEB128Size(TypeOffset) + // TType base offset
+ TypeOffset;
+
+ unsigned SizeAlign = (4 - TotalSize) & 3;
+
+ // Begin the exception table.
+ FinalSize = RoundUpToAlign(FinalSize, 4);
+ for (unsigned i = 0; i != SizeAlign; ++i) {
+ ++FinalSize;
+ }
+
+ unsigned PointerSize = TD->getPointerSize();
+
+ // Emit the header.
+ ++FinalSize;
+ // Asm->EOL("LPStart format (DW_EH_PE_omit)");
+ ++FinalSize;
+ // Asm->EOL("TType format (DW_EH_PE_absptr)");
+ ++FinalSize;
+ // Asm->EOL("TType base offset");
+ ++FinalSize;
+ // Asm->EOL("Call site format (DW_EH_PE_udata4)");
+ ++FinalSize;
+ // Asm->EOL("Call-site table length");
+
+ // Emit the landing pad site information.
+ for (unsigned i = 0; i < CallSites.size(); ++i) {
+ CallSiteEntry &S = CallSites[i];
+
+ // Asm->EOL("Region start");
+ FinalSize += PointerSize;
+
+ //Asm->EOL("Region length");
+ FinalSize += PointerSize;
+
+ // Asm->EOL("Landing pad");
+ FinalSize += PointerSize;
+
+ FinalSize += TargetAsmInfo::getULEB128Size(S.Action);
+ // Asm->EOL("Action");
+ }
+
+ // Emit the actions.
+ for (unsigned I = 0, N = Actions.size(); I != N; ++I) {
+ ActionEntry &Action = Actions[I];
+
+ //Asm->EOL("TypeInfo index");
+ FinalSize += TargetAsmInfo::getSLEB128Size(Action.ValueForTypeID);
+ //Asm->EOL("Next action");
+ FinalSize += TargetAsmInfo::getSLEB128Size(Action.NextAction);
+ }
+
+ // Emit the type ids.
+ for (unsigned M = TypeInfos.size(); M; --M) {
+ // Asm->EOL("TypeInfo");
+ FinalSize += PointerSize;
+ }
+
+ // Emit the filter typeids.
+ for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) {
+ unsigned TypeID = FilterIds[j];
+ FinalSize += TargetAsmInfo::getULEB128Size(TypeID);
+ //Asm->EOL("Filter TypeInfo index");
+ }
+
+ FinalSize = RoundUpToAlign(FinalSize, 4);
+
+ return FinalSize;
+}
diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.h b/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
new file mode 100644
index 0000000..9120ed4
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
@@ -0,0 +1,87 @@
+//===------ JITDwarfEmitter.h - Write dwarf tables into memory ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITDwarfEmitter object that is used by the JIT to
+// write dwarf tables to memory.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTION_ENGINE_JIT_DWARFEMITTER_H
+#define LLVM_EXECUTION_ENGINE_JIT_DWARFEMITTER_H
+
+namespace llvm {
+
+class Function;
+class JITCodeEmitter;
+class MachineFunction;
+class MachineModuleInfo;
+class MachineMove;
+class TargetData;
+class TargetMachine;
+class TargetRegisterInfo;
+
+class JITDwarfEmitter {
+ const TargetData* TD;
+ JITCodeEmitter* JCE;
+ const TargetRegisterInfo* RI;
+ MachineModuleInfo* MMI;
+ JIT& Jit;
+ bool needsIndirectEncoding;
+ bool stackGrowthDirection;
+
+ unsigned char* EmitExceptionTable(MachineFunction* MF,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction) const;
+
+ void EmitFrameMoves(intptr_t BaseLabelPtr,
+ const std::vector<MachineMove> &Moves) const;
+
+ unsigned char* EmitCommonEHFrame(const Function* Personality) const;
+
+ unsigned char* EmitEHFrame(const Function* Personality,
+ unsigned char* StartBufferPtr,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction,
+ unsigned char* ExceptionTable) const;
+
+ unsigned GetExceptionTableSizeInBytes(MachineFunction* MF) const;
+
+ unsigned
+ GetFrameMovesSizeInBytes(intptr_t BaseLabelPtr,
+ const std::vector<MachineMove> &Moves) const;
+
+ unsigned GetCommonEHFrameSizeInBytes(const Function* Personality) const;
+
+ unsigned GetEHFrameSizeInBytes(const Function* Personality,
+ unsigned char* StartFunction) const;
+
+public:
+
+ JITDwarfEmitter(JIT& jit);
+
+ unsigned char* EmitDwarfTable(MachineFunction& F,
+ JITCodeEmitter& JCE,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction);
+
+
+ unsigned GetDwarfTableSizeInBytes(MachineFunction& F,
+ JITCodeEmitter& JCE,
+ unsigned char* StartFunction,
+ unsigned char* EndFunction);
+
+ void setModuleInfo(MachineModuleInfo* Info) {
+ MMI = Info;
+ }
+};
+
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTION_ENGINE_JIT_DWARFEMITTER_H
diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp
new file mode 100644
index 0000000..d3b0820
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -0,0 +1,1615 @@
+//===-- JITEmitter.cpp - Write machine code to executable memory ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a MachineCodeEmitter object that is used by the JIT to
+// write machine code to memory and remember where relocatable values are.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "jit"
+#include "JIT.h"
+#include "JITDwarfEmitter.h"
+#include "llvm/Constants.h"
+#include "llvm/Module.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/CodeGen/JITCodeEmitter.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRelocation.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/CodeGen/MachineCodeInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetJITInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/System/Disassembler.h"
+#include "llvm/System/Memory.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include <algorithm>
+#ifndef NDEBUG
+#include <iomanip>
+#endif
+using namespace llvm;
+
+STATISTIC(NumBytes, "Number of bytes of machine code compiled");
+STATISTIC(NumRelos, "Number of relocations applied");
+static JIT *TheJIT = 0;
+
+
+//===----------------------------------------------------------------------===//
+// JIT lazy compilation code.
+//
+namespace {
+ class JITResolverState {
+ public:
+ typedef std::map<AssertingVH<Function>, void*> FunctionToStubMapTy;
+ typedef std::map<void*, Function*> StubToFunctionMapTy;
+ typedef std::map<AssertingVH<GlobalValue>, void*> GlobalToIndirectSymMapTy;
+ private:
+ /// FunctionToStubMap - Keep track of the stub created for a particular
+ /// function so that we can reuse them if necessary.
+ FunctionToStubMapTy FunctionToStubMap;
+
+ /// StubToFunctionMap - Keep track of the function that each stub
+ /// corresponds to.
+ StubToFunctionMapTy StubToFunctionMap;
+
+ /// GlobalToIndirectSymMap - Keep track of the indirect symbol created for a
+ /// particular GlobalVariable so that we can reuse them if necessary.
+ GlobalToIndirectSymMapTy GlobalToIndirectSymMap;
+
+ public:
+ FunctionToStubMapTy& getFunctionToStubMap(const MutexGuard& locked) {
+ assert(locked.holds(TheJIT->lock));
+ return FunctionToStubMap;
+ }
+
+ StubToFunctionMapTy& getStubToFunctionMap(const MutexGuard& locked) {
+ assert(locked.holds(TheJIT->lock));
+ return StubToFunctionMap;
+ }
+
+ GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap(const MutexGuard& locked) {
+ assert(locked.holds(TheJIT->lock));
+ return GlobalToIndirectSymMap;
+ }
+ };
+
+ /// JITResolver - Keep track of, and resolve, call sites for functions that
+ /// have not yet been compiled.
+ class JITResolver {
+ typedef JITResolverState::FunctionToStubMapTy FunctionToStubMapTy;
+ typedef JITResolverState::StubToFunctionMapTy StubToFunctionMapTy;
+ typedef JITResolverState::GlobalToIndirectSymMapTy GlobalToIndirectSymMapTy;
+
+ /// LazyResolverFn - The target lazy resolver function that we actually
+ /// rewrite instructions to use.
+ TargetJITInfo::LazyResolverFn LazyResolverFn;
+
+ JITResolverState state;
+
+ /// ExternalFnToStubMap - This is the equivalent of FunctionToStubMap for
+ /// external functions.
+ std::map<void*, void*> ExternalFnToStubMap;
+
+ /// revGOTMap - map addresses to indexes in the GOT
+ std::map<void*, unsigned> revGOTMap;
+ unsigned nextGOTIndex;
+
+ static JITResolver *TheJITResolver;
+ public:
+ explicit JITResolver(JIT &jit) : nextGOTIndex(0) {
+ TheJIT = &jit;
+
+ LazyResolverFn = jit.getJITInfo().getLazyResolverFunction(JITCompilerFn);
+ assert(TheJITResolver == 0 && "Multiple JIT resolvers?");
+ TheJITResolver = this;
+ }
+
+ ~JITResolver() {
+ TheJITResolver = 0;
+ }
+
+ /// getFunctionStubIfAvailable - This returns a pointer to a function stub
+ /// if it has already been created.
+ void *getFunctionStubIfAvailable(Function *F);
+
+ /// getFunctionStub - This returns a pointer to a function stub, creating
+ /// one on demand as needed. If empty is true, create a function stub
+ /// pointing at address 0, to be filled in later.
+ void *getFunctionStub(Function *F);
+
+ /// getExternalFunctionStub - Return a stub for the function at the
+ /// specified address, created lazily on demand.
+ void *getExternalFunctionStub(void *FnAddr);
+
+ /// getGlobalValueIndirectSym - Return an indirect symbol containing the
+ /// specified GV address.
+ void *getGlobalValueIndirectSym(GlobalValue *V, void *GVAddress);
+
+ /// AddCallbackAtLocation - If the target is capable of rewriting an
+ /// instruction without the use of a stub, record the location of the use so
+ /// we know which function is being used at the location.
+ void *AddCallbackAtLocation(Function *F, void *Location) {
+ MutexGuard locked(TheJIT->lock);
+ /// Get the target-specific JIT resolver function.
+ state.getStubToFunctionMap(locked)[Location] = F;
+ return (void*)(intptr_t)LazyResolverFn;
+ }
+
+ void getRelocatableGVs(SmallVectorImpl<GlobalValue*> &GVs,
+ SmallVectorImpl<void*> &Ptrs);
+
+ GlobalValue *invalidateStub(void *Stub);
+
+ /// getGOTIndexForAddress - Return a new or existing index in the GOT for
+ /// an address. This function only manages slots, it does not manage the
+ /// contents of the slots or the memory associated with the GOT.
+ unsigned getGOTIndexForAddr(void *addr);
+
+ /// JITCompilerFn - This function is called to resolve a stub to a compiled
+ /// address. If the LLVM Function corresponding to the stub has not yet
+ /// been compiled, this function compiles it first.
+ static void *JITCompilerFn(void *Stub);
+ };
+}
+
+JITResolver *JITResolver::TheJITResolver = 0;
+
+/// getFunctionStubIfAvailable - This returns a pointer to a function stub
+/// if it has already been created.
+void *JITResolver::getFunctionStubIfAvailable(Function *F) {
+ MutexGuard locked(TheJIT->lock);
+
+ // If we already have a stub for this function, recycle it.
+ void *&Stub = state.getFunctionToStubMap(locked)[F];
+ return Stub;
+}
+
+/// getFunctionStub - This returns a pointer to a function stub, creating
+/// one on demand as needed.
+void *JITResolver::getFunctionStub(Function *F) {
+ MutexGuard locked(TheJIT->lock);
+
+ // If we already have a stub for this function, recycle it.
+ void *&Stub = state.getFunctionToStubMap(locked)[F];
+ if (Stub) return Stub;
+
+ // Call the lazy resolver function unless we are JIT'ing non-lazily, in which
+ // case we must resolve the symbol now.
+ void *Actual = TheJIT->isLazyCompilationDisabled()
+ ? (void *)0 : (void *)(intptr_t)LazyResolverFn;
+
+ // If this is an external declaration, attempt to resolve the address now
+ // to place in the stub.
+ if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) {
+ Actual = TheJIT->getPointerToFunction(F);
+
+ // If we resolved the symbol to a null address (eg. a weak external)
+ // don't emit a stub. Return a null pointer to the application. If dlsym
+ // stubs are enabled, not being able to resolve the address is not
+ // meaningful.
+ if (!Actual && !TheJIT->areDlsymStubsEnabled()) return 0;
+ }
+
+ // Codegen a new stub, calling the lazy resolver or the actual address of the
+ // external function, if it was resolved.
+ Stub = TheJIT->getJITInfo().emitFunctionStub(F, Actual,
+ *TheJIT->getCodeEmitter());
+
+ if (Actual != (void*)(intptr_t)LazyResolverFn) {
+ // If we are getting the stub for an external function, we really want the
+ // address of the stub in the GlobalAddressMap for the JIT, not the address
+ // of the external function.
+ TheJIT->updateGlobalMapping(F, Stub);
+ }
+
+ DOUT << "JIT: Stub emitted at [" << Stub << "] for function '"
+ << F->getName() << "'\n";
+
+ // Finally, keep track of the stub-to-Function mapping so that the
+ // JITCompilerFn knows which function to compile!
+ state.getStubToFunctionMap(locked)[Stub] = F;
+
+ // If we are JIT'ing non-lazily but need to call a function that does not
+ // exist yet, add it to the JIT's work list so that we can fill in the stub
+ // address later.
+ if (!Actual && TheJIT->isLazyCompilationDisabled())
+ if (!F->isDeclaration() || F->hasNotBeenReadFromBitcode())
+ TheJIT->addPendingFunction(F);
+
+ return Stub;
+}
+
+/// getGlobalValueIndirectSym - Return a lazy pointer containing the specified
+/// GV address.
+void *JITResolver::getGlobalValueIndirectSym(GlobalValue *GV, void *GVAddress) {
+ MutexGuard locked(TheJIT->lock);
+
+ // If we already have a stub for this global variable, recycle it.
+ void *&IndirectSym = state.getGlobalToIndirectSymMap(locked)[GV];
+ if (IndirectSym) return IndirectSym;
+
+ // Otherwise, codegen a new indirect symbol.
+ IndirectSym = TheJIT->getJITInfo().emitGlobalValueIndirectSym(GV, GVAddress,
+ *TheJIT->getCodeEmitter());
+
+ DOUT << "JIT: Indirect symbol emitted at [" << IndirectSym << "] for GV '"
+ << GV->getName() << "'\n";
+
+ return IndirectSym;
+}
+
+/// getExternalFunctionStub - Return a stub for the function at the
+/// specified address, created lazily on demand.
+void *JITResolver::getExternalFunctionStub(void *FnAddr) {
+ // If we already have a stub for this function, recycle it.
+ void *&Stub = ExternalFnToStubMap[FnAddr];
+ if (Stub) return Stub;
+
+ Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr,
+ *TheJIT->getCodeEmitter());
+
+ DOUT << "JIT: Stub emitted at [" << Stub
+ << "] for external function at '" << FnAddr << "'\n";
+ return Stub;
+}
+
+unsigned JITResolver::getGOTIndexForAddr(void* addr) {
+ unsigned idx = revGOTMap[addr];
+ if (!idx) {
+ idx = ++nextGOTIndex;
+ revGOTMap[addr] = idx;
+ DOUT << "JIT: Adding GOT entry " << idx << " for addr [" << addr << "]\n";
+ }
+ return idx;
+}
+
+void JITResolver::getRelocatableGVs(SmallVectorImpl<GlobalValue*> &GVs,
+ SmallVectorImpl<void*> &Ptrs) {
+ MutexGuard locked(TheJIT->lock);
+
+ FunctionToStubMapTy &FM = state.getFunctionToStubMap(locked);
+ GlobalToIndirectSymMapTy &GM = state.getGlobalToIndirectSymMap(locked);
+
+ for (FunctionToStubMapTy::iterator i = FM.begin(), e = FM.end(); i != e; ++i){
+ Function *F = i->first;
+ if (F->isDeclaration() && F->hasExternalLinkage()) {
+ GVs.push_back(i->first);
+ Ptrs.push_back(i->second);
+ }
+ }
+ for (GlobalToIndirectSymMapTy::iterator i = GM.begin(), e = GM.end();
+ i != e; ++i) {
+ GVs.push_back(i->first);
+ Ptrs.push_back(i->second);
+ }
+}
+
+GlobalValue *JITResolver::invalidateStub(void *Stub) {
+ MutexGuard locked(TheJIT->lock);
+
+ FunctionToStubMapTy &FM = state.getFunctionToStubMap(locked);
+ StubToFunctionMapTy &SM = state.getStubToFunctionMap(locked);
+ GlobalToIndirectSymMapTy &GM = state.getGlobalToIndirectSymMap(locked);
+
+ // Look up the cheap way first, to see if it's a function stub we are
+ // invalidating. If so, remove it from both the forward and reverse maps.
+ if (SM.find(Stub) != SM.end()) {
+ Function *F = SM[Stub];
+ SM.erase(Stub);
+ FM.erase(F);
+ return F;
+ }
+
+ // Otherwise, it might be an indirect symbol stub. Find it and remove it.
+ for (GlobalToIndirectSymMapTy::iterator i = GM.begin(), e = GM.end();
+ i != e; ++i) {
+ if (i->second != Stub)
+ continue;
+ GlobalValue *GV = i->first;
+ GM.erase(i);
+ return GV;
+ }
+
+ // Lastly, check to see if it's in the ExternalFnToStubMap.
+ for (std::map<void *, void *>::iterator i = ExternalFnToStubMap.begin(),
+ e = ExternalFnToStubMap.end(); i != e; ++i) {
+ if (i->second != Stub)
+ continue;
+ ExternalFnToStubMap.erase(i);
+ break;
+ }
+
+ return 0;
+}
+
+/// JITCompilerFn - This function is called when a lazy compilation stub has
+/// been entered. It looks up which function this stub corresponds to, compiles
+/// it if necessary, then returns the resultant function pointer.
+void *JITResolver::JITCompilerFn(void *Stub) {
+ JITResolver &JR = *TheJITResolver;
+
+ Function* F = 0;
+ void* ActualPtr = 0;
+
+ {
+ // Only lock for getting the Function. The call getPointerToFunction made
+ // in this function might trigger function materializing, which requires
+ // JIT lock to be unlocked.
+ MutexGuard locked(TheJIT->lock);
+
+ // The address given to us for the stub may not be exactly right, it might be
+ // a little bit after the stub. As such, use upper_bound to find it.
+ StubToFunctionMapTy::iterator I =
+ JR.state.getStubToFunctionMap(locked).upper_bound(Stub);
+ assert(I != JR.state.getStubToFunctionMap(locked).begin() &&
+ "This is not a known stub!");
+ F = (--I)->second;
+ ActualPtr = I->first;
+ }
+
+ // If we have already code generated the function, just return the address.
+ void *Result = TheJIT->getPointerToGlobalIfAvailable(F);
+
+ if (!Result) {
+ // Otherwise we don't have it, do lazy compilation now.
+
+ // If lazy compilation is disabled, emit a useful error message and abort.
+ if (TheJIT->isLazyCompilationDisabled()) {
+ cerr << "LLVM JIT requested to do lazy compilation of function '"
+ << F->getName() << "' when lazy compiles are disabled!\n";
+ abort();
+ }
+
+ // We might like to remove the stub from the StubToFunction map.
+ // We can't do that! Multiple threads could be stuck, waiting to acquire the
+ // lock above. As soon as the 1st function finishes compiling the function,
+ // the next one will be released, and needs to be able to find the function
+ // it needs to call.
+ //JR.state.getStubToFunctionMap(locked).erase(I);
+
+ DOUT << "JIT: Lazily resolving function '" << F->getName()
+ << "' In stub ptr = " << Stub << " actual ptr = "
+ << ActualPtr << "\n";
+
+ Result = TheJIT->getPointerToFunction(F);
+ }
+
+ // Reacquire the lock to erase the stub in the map.
+ MutexGuard locked(TheJIT->lock);
+
+ // We don't need to reuse this stub in the future, as F is now compiled.
+ JR.state.getFunctionToStubMap(locked).erase(F);
+
+ // FIXME: We could rewrite all references to this stub if we knew them.
+
+ // What we will do is set the compiled function address to map to the
+ // same GOT entry as the stub so that later clients may update the GOT
+ // if they see it still using the stub address.
+ // Note: this is done so the Resolver doesn't have to manage GOT memory
+ // Do this without allocating map space if the target isn't using a GOT
+ if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end())
+ JR.revGOTMap[Result] = JR.revGOTMap[Stub];
+
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// Function Index Support
+
+// On MacOS we generate an index of currently JIT'd functions so that
+// performance tools can determine a symbol name and accurate code range for a
+// PC value. Because performance tools are generally asynchronous, the code
+// below is written with the hope that it could be interrupted at any time and
+// have useful answers. However, we don't go crazy with atomic operations, we
+// just do a "reasonable effort".
+#ifdef __APPLE__
+#define ENABLE_JIT_SYMBOL_TABLE 0
+#endif
+
+/// JitSymbolEntry - Each function that is JIT compiled results in one of these
+/// being added to an array of symbols. This indicates the name of the function
+/// as well as the address range it occupies. This allows the client to map
+/// from a PC value to the name of the function.
+struct JitSymbolEntry {
+ const char *FnName; // FnName - a strdup'd string.
+ void *FnStart;
+ intptr_t FnSize;
+};
+
+
+struct JitSymbolTable {
+ /// NextPtr - This forms a linked list of JitSymbolTable entries. This
+ /// pointer is not used right now, but might be used in the future. Consider
+ /// it reserved for future use.
+ JitSymbolTable *NextPtr;
+
+ /// Symbols - This is an array of JitSymbolEntry entries. Only the first
+ /// 'NumSymbols' symbols are valid.
+ JitSymbolEntry *Symbols;
+
+ /// NumSymbols - This indicates the number entries in the Symbols array that
+ /// are valid.
+ unsigned NumSymbols;
+
+ /// NumAllocated - This indicates the amount of space we have in the Symbols
+ /// array. This is a private field that should not be read by external tools.
+ unsigned NumAllocated;
+};
+
+#if ENABLE_JIT_SYMBOL_TABLE
+JitSymbolTable *__jitSymbolTable;
+#endif
+
+static void AddFunctionToSymbolTable(const char *FnName,
+ void *FnStart, intptr_t FnSize) {
+ assert(FnName != 0 && FnStart != 0 && "Bad symbol to add");
+ JitSymbolTable **SymTabPtrPtr = 0;
+#if !ENABLE_JIT_SYMBOL_TABLE
+ return;
+#else
+ SymTabPtrPtr = &__jitSymbolTable;
+#endif
+
+ // If this is the first entry in the symbol table, add the JitSymbolTable
+ // index.
+ if (*SymTabPtrPtr == 0) {
+ JitSymbolTable *New = new JitSymbolTable();
+ New->NextPtr = 0;
+ New->Symbols = 0;
+ New->NumSymbols = 0;
+ New->NumAllocated = 0;
+ *SymTabPtrPtr = New;
+ }
+
+ JitSymbolTable *SymTabPtr = *SymTabPtrPtr;
+
+ // If we have space in the table, reallocate the table.
+ if (SymTabPtr->NumSymbols >= SymTabPtr->NumAllocated) {
+ // If we don't have space, reallocate the table.
+ unsigned NewSize = std::max(64U, SymTabPtr->NumAllocated*2);
+ JitSymbolEntry *NewSymbols = new JitSymbolEntry[NewSize];
+ JitSymbolEntry *OldSymbols = SymTabPtr->Symbols;
+
+ // Copy the old entries over.
+ memcpy(NewSymbols, OldSymbols, SymTabPtr->NumSymbols*sizeof(OldSymbols[0]));
+
+ // Swap the new symbols in, delete the old ones.
+ SymTabPtr->Symbols = NewSymbols;
+ SymTabPtr->NumAllocated = NewSize;
+ delete [] OldSymbols;
+ }
+
+ // Otherwise, we have enough space, just tack it onto the end of the array.
+ JitSymbolEntry &Entry = SymTabPtr->Symbols[SymTabPtr->NumSymbols];
+ Entry.FnName = strdup(FnName);
+ Entry.FnStart = FnStart;
+ Entry.FnSize = FnSize;
+ ++SymTabPtr->NumSymbols;
+}
+
+static void RemoveFunctionFromSymbolTable(void *FnStart) {
+ assert(FnStart && "Invalid function pointer");
+ JitSymbolTable **SymTabPtrPtr = 0;
+#if !ENABLE_JIT_SYMBOL_TABLE
+ return;
+#else
+ SymTabPtrPtr = &__jitSymbolTable;
+#endif
+
+ JitSymbolTable *SymTabPtr = *SymTabPtrPtr;
+ JitSymbolEntry *Symbols = SymTabPtr->Symbols;
+
+ // Scan the table to find its index. The table is not sorted, so do a linear
+ // scan.
+ unsigned Index;
+ for (Index = 0; Symbols[Index].FnStart != FnStart; ++Index)
+ assert(Index != SymTabPtr->NumSymbols && "Didn't find function!");
+
+ // Once we have an index, we know to nuke this entry, overwrite it with the
+ // entry at the end of the array, making the last entry redundant.
+ const char *OldName = Symbols[Index].FnName;
+ Symbols[Index] = Symbols[SymTabPtr->NumSymbols-1];
+ free((void*)OldName);
+
+ // Drop the number of symbols in the table.
+ --SymTabPtr->NumSymbols;
+
+ // Finally, if we deleted the final symbol, deallocate the table itself.
+ if (SymTabPtr->NumSymbols != 0)
+ return;
+
+ *SymTabPtrPtr = 0;
+ delete [] Symbols;
+ delete SymTabPtr;
+}
+
+//===----------------------------------------------------------------------===//
+// JITEmitter code.
+//
+namespace {
+ /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is
+ /// used to output functions to memory for execution.
+ class JITEmitter : public JITCodeEmitter {
+ JITMemoryManager *MemMgr;
+
+ // When outputting a function stub in the context of some other function, we
+ // save BufferBegin/BufferEnd/CurBufferPtr here.
+ uint8_t *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
+
+ /// Relocations - These are the relocations that the function needs, as
+ /// emitted.
+ std::vector<MachineRelocation> Relocations;
+
+ /// MBBLocations - This vector is a mapping from MBB ID's to their address.
+ /// It is filled in by the StartMachineBasicBlock callback and queried by
+ /// the getMachineBasicBlockAddress callback.
+ std::vector<uintptr_t> MBBLocations;
+
+ /// ConstantPool - The constant pool for the current function.
+ ///
+ MachineConstantPool *ConstantPool;
+
+ /// ConstantPoolBase - A pointer to the first entry in the constant pool.
+ ///
+ void *ConstantPoolBase;
+
+ /// ConstPoolAddresses - Addresses of individual constant pool entries.
+ ///
+ SmallVector<uintptr_t, 8> ConstPoolAddresses;
+
+ /// JumpTable - The jump tables for the current function.
+ ///
+ MachineJumpTableInfo *JumpTable;
+
+ /// JumpTableBase - A pointer to the first entry in the jump table.
+ ///
+ void *JumpTableBase;
+
+ /// Resolver - This contains info about the currently resolved functions.
+ JITResolver Resolver;
+
+ /// DE - The dwarf emitter for the jit.
+ JITDwarfEmitter *DE;
+
+ /// LabelLocations - This vector is a mapping from Label ID's to their
+ /// address.
+ std::vector<uintptr_t> LabelLocations;
+
+ /// MMI - Machine module info for exception informations
+ MachineModuleInfo* MMI;
+
+ // GVSet - a set to keep track of which globals have been seen
+ SmallPtrSet<const GlobalVariable*, 8> GVSet;
+
+ // CurFn - The llvm function being emitted. Only valid during
+ // finishFunction().
+ const Function *CurFn;
+
+ // CurFnStubUses - For a given Function, a vector of stubs that it
+ // references. This facilitates the JIT detecting that a stub is no
+ // longer used, so that it may be deallocated.
+ DenseMap<const Function *, SmallVector<void*, 1> > CurFnStubUses;
+
+ // StubFnRefs - For a given pointer to a stub, a set of Functions which
+ // reference the stub. When the count of a stub's references drops to zero,
+ // the stub is unused.
+ DenseMap<void *, SmallPtrSet<const Function*, 1> > StubFnRefs;
+
+ // ExtFnStubs - A map of external function names to stubs which have entries
+ // in the JITResolver's ExternalFnToStubMap.
+ StringMap<void *> ExtFnStubs;
+
+ // MCI - A pointer to a MachineCodeInfo object to update with information.
+ MachineCodeInfo *MCI;
+
+ public:
+ JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit), CurFn(0), MCI(0) {
+ MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
+ if (jit.getJITInfo().needsGOT()) {
+ MemMgr->AllocateGOT();
+ DOUT << "JIT is managing a GOT\n";
+ }
+
+ if (ExceptionHandling) DE = new JITDwarfEmitter(jit);
+ }
+ ~JITEmitter() {
+ delete MemMgr;
+ if (ExceptionHandling) delete DE;
+ }
+
+ /// classof - Methods for support type inquiry through isa, cast, and
+ /// dyn_cast:
+ ///
+ static inline bool classof(const JITEmitter*) { return true; }
+ static inline bool classof(const MachineCodeEmitter*) { return true; }
+
+ JITResolver &getJITResolver() { return Resolver; }
+
+ virtual void startFunction(MachineFunction &F);
+ virtual bool finishFunction(MachineFunction &F);
+
+ void emitConstantPool(MachineConstantPool *MCP);
+ void initJumpTableInfo(MachineJumpTableInfo *MJTI);
+ void emitJumpTableInfo(MachineJumpTableInfo *MJTI);
+
+ virtual void startGVStub(const GlobalValue* GV, unsigned StubSize,
+ unsigned Alignment = 1);
+ virtual void startGVStub(const GlobalValue* GV, void *Buffer,
+ unsigned StubSize);
+ virtual void* finishGVStub(const GlobalValue *GV);
+
+ /// allocateSpace - Reserves space in the current block if any, or
+ /// allocate a new one of the given size.
+ virtual void *allocateSpace(uintptr_t Size, unsigned Alignment);
+
+ virtual void addRelocation(const MachineRelocation &MR) {
+ Relocations.push_back(MR);
+ }
+
+ virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
+ if (MBBLocations.size() <= (unsigned)MBB->getNumber())
+ MBBLocations.resize((MBB->getNumber()+1)*2);
+ MBBLocations[MBB->getNumber()] = getCurrentPCValue();
+ DOUT << "JIT: Emitting BB" << MBB->getNumber() << " at ["
+ << (void*) getCurrentPCValue() << "]\n";
+ }
+
+ virtual uintptr_t getConstantPoolEntryAddress(unsigned Entry) const;
+ virtual uintptr_t getJumpTableEntryAddress(unsigned Entry) const;
+
+ virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const {
+ assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
+ MBBLocations[MBB->getNumber()] && "MBB not emitted!");
+ return MBBLocations[MBB->getNumber()];
+ }
+
+ /// deallocateMemForFunction - Deallocate all memory for the specified
+ /// function body.
+ void deallocateMemForFunction(Function *F);
+
+ /// AddStubToCurrentFunction - Mark the current function being JIT'd as
+ /// using the stub at the specified address. Allows
+ /// deallocateMemForFunction to also remove stubs no longer referenced.
+ void AddStubToCurrentFunction(void *Stub);
+
+ /// getExternalFnStubs - Accessor for the JIT to find stubs emitted for
+ /// MachineRelocations that reference external functions by name.
+ const StringMap<void*> &getExternalFnStubs() const { return ExtFnStubs; }
+
+ virtual void emitLabel(uint64_t LabelID) {
+ if (LabelLocations.size() <= LabelID)
+ LabelLocations.resize((LabelID+1)*2);
+ LabelLocations[LabelID] = getCurrentPCValue();
+ }
+
+ virtual uintptr_t getLabelAddress(uint64_t LabelID) const {
+ assert(LabelLocations.size() > (unsigned)LabelID &&
+ LabelLocations[LabelID] && "Label not emitted!");
+ return LabelLocations[LabelID];
+ }
+
+ virtual void setModuleInfo(MachineModuleInfo* Info) {
+ MMI = Info;
+ if (ExceptionHandling) DE->setModuleInfo(Info);
+ }
+
+ void setMemoryExecutable(void) {
+ MemMgr->setMemoryExecutable();
+ }
+
+ JITMemoryManager *getMemMgr(void) const { return MemMgr; }
+
+ void setMachineCodeInfo(MachineCodeInfo *mci) {
+ MCI = mci;
+ }
+
+ private:
+ void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub);
+ void *getPointerToGVIndirectSym(GlobalValue *V, void *Reference,
+ bool NoNeedStub);
+ unsigned addSizeOfGlobal(const GlobalVariable *GV, unsigned Size);
+ unsigned addSizeOfGlobalsInConstantVal(const Constant *C, unsigned Size);
+ unsigned addSizeOfGlobalsInInitializer(const Constant *Init, unsigned Size);
+ unsigned GetSizeOfGlobalsInBytes(MachineFunction &MF);
+ };
+}
+
+void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference,
+ bool DoesntNeedStub) {
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ return TheJIT->getOrEmitGlobalVariable(GV);
+
+ if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
+ return TheJIT->getPointerToGlobal(GA->resolveAliasedGlobal(false));
+
+ // If we have already compiled the function, return a pointer to its body.
+ Function *F = cast<Function>(V);
+ void *ResultPtr;
+ if (!DoesntNeedStub && !TheJIT->isLazyCompilationDisabled()) {
+ // Return the function stub if it's already created.
+ ResultPtr = Resolver.getFunctionStubIfAvailable(F);
+ if (ResultPtr)
+ AddStubToCurrentFunction(ResultPtr);
+ } else {
+ ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F);
+ }
+ if (ResultPtr) return ResultPtr;
+
+ // If this is an external function pointer, we can force the JIT to
+ // 'compile' it, which really just adds it to the map. In dlsym mode,
+ // external functions are forced through a stub, regardless of reloc type.
+ if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode() &&
+ DoesntNeedStub && !TheJIT->areDlsymStubsEnabled())
+ return TheJIT->getPointerToFunction(F);
+
+ // Okay, the function has not been compiled yet, if the target callback
+ // mechanism is capable of rewriting the instruction directly, prefer to do
+ // that instead of emitting a stub. This uses the lazy resolver, so is not
+ // legal if lazy compilation is disabled.
+ if (DoesntNeedStub && !TheJIT->isLazyCompilationDisabled())
+ return Resolver.AddCallbackAtLocation(F, Reference);
+
+ // Otherwise, we have to emit a stub.
+ void *StubAddr = Resolver.getFunctionStub(F);
+
+ // Add the stub to the current function's list of referenced stubs, so we can
+ // deallocate them if the current function is ever freed. It's possible to
+ // return null from getFunctionStub in the case of a weak extern that fails
+ // to resolve.
+ if (StubAddr)
+ AddStubToCurrentFunction(StubAddr);
+
+ return StubAddr;
+}
+
+void *JITEmitter::getPointerToGVIndirectSym(GlobalValue *V, void *Reference,
+ bool NoNeedStub) {
+ // Make sure GV is emitted first, and create a stub containing the fully
+ // resolved address.
+ void *GVAddress = getPointerToGlobal(V, Reference, true);
+ void *StubAddr = Resolver.getGlobalValueIndirectSym(V, GVAddress);
+
+ // Add the stub to the current function's list of referenced stubs, so we can
+ // deallocate them if the current function is ever freed.
+ AddStubToCurrentFunction(StubAddr);
+
+ return StubAddr;
+}
+
+void JITEmitter::AddStubToCurrentFunction(void *StubAddr) {
+ if (!TheJIT->areDlsymStubsEnabled())
+ return;
+
+ assert(CurFn && "Stub added to current function, but current function is 0!");
+
+ SmallVectorImpl<void*> &StubsUsed = CurFnStubUses[CurFn];
+ StubsUsed.push_back(StubAddr);
+
+ SmallPtrSet<const Function *, 1> &FnRefs = StubFnRefs[StubAddr];
+ FnRefs.insert(CurFn);
+}
+
+static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
+ const TargetData *TD) {
+ const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
+ if (Constants.empty()) return 0;
+
+ unsigned Size = 0;
+ for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
+ MachineConstantPoolEntry CPE = Constants[i];
+ unsigned AlignMask = CPE.getAlignment() - 1;
+ Size = (Size + AlignMask) & ~AlignMask;
+ const Type *Ty = CPE.getType();
+ Size += TD->getTypeAllocSize(Ty);
+ }
+ return Size;
+}
+
+static unsigned GetJumpTableSizeInBytes(MachineJumpTableInfo *MJTI) {
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ if (JT.empty()) return 0;
+
+ unsigned NumEntries = 0;
+ for (unsigned i = 0, e = JT.size(); i != e; ++i)
+ NumEntries += JT[i].MBBs.size();
+
+ unsigned EntrySize = MJTI->getEntrySize();
+
+ return NumEntries * EntrySize;
+}
+
+static uintptr_t RoundUpToAlign(uintptr_t Size, unsigned Alignment) {
+ if (Alignment == 0) Alignment = 1;
+ // Since we do not know where the buffer will be allocated, be pessimistic.
+ return Size + Alignment;
+}
+
+/// addSizeOfGlobal - add the size of the global (plus any alignment padding)
+/// into the running total Size.
+
+unsigned JITEmitter::addSizeOfGlobal(const GlobalVariable *GV, unsigned Size) {
+ const Type *ElTy = GV->getType()->getElementType();
+ size_t GVSize = (size_t)TheJIT->getTargetData()->getTypeAllocSize(ElTy);
+ size_t GVAlign =
+ (size_t)TheJIT->getTargetData()->getPreferredAlignment(GV);
+ DOUT << "JIT: Adding in size " << GVSize << " alignment " << GVAlign;
+ DEBUG(GV->dump());
+ // Assume code section ends with worst possible alignment, so first
+ // variable needs maximal padding.
+ if (Size==0)
+ Size = 1;
+ Size = ((Size+GVAlign-1)/GVAlign)*GVAlign;
+ Size += GVSize;
+ return Size;
+}
+
+/// addSizeOfGlobalsInConstantVal - find any globals that we haven't seen yet
+/// but are referenced from the constant; put them in GVSet and add their
+/// size into the running total Size.
+
+unsigned JITEmitter::addSizeOfGlobalsInConstantVal(const Constant *C,
+ unsigned Size) {
+ // If its undefined, return the garbage.
+ if (isa<UndefValue>(C))
+ return Size;
+
+ // If the value is a ConstantExpr
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ Constant *Op0 = CE->getOperand(0);
+ switch (CE->getOpcode()) {
+ case Instruction::GetElementPtr:
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast: {
+ Size = addSizeOfGlobalsInConstantVal(Op0, Size);
+ break;
+ }
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ Size = addSizeOfGlobalsInConstantVal(Op0, Size);
+ Size = addSizeOfGlobalsInConstantVal(CE->getOperand(1), Size);
+ break;
+ }
+ default: {
+ cerr << "ConstantExpr not handled: " << *CE << "\n";
+ abort();
+ }
+ }
+ }
+
+ if (C->getType()->getTypeID() == Type::PointerTyID)
+ if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(C))
+ if (GVSet.insert(GV))
+ Size = addSizeOfGlobal(GV, Size);
+
+ return Size;
+}
+
+/// addSizeOfGLobalsInInitializer - handle any globals that we haven't seen yet
+/// but are referenced from the given initializer.
+
+unsigned JITEmitter::addSizeOfGlobalsInInitializer(const Constant *Init,
+ unsigned Size) {
+ if (!isa<UndefValue>(Init) &&
+ !isa<ConstantVector>(Init) &&
+ !isa<ConstantAggregateZero>(Init) &&
+ !isa<ConstantArray>(Init) &&
+ !isa<ConstantStruct>(Init) &&
+ Init->getType()->isFirstClassType())
+ Size = addSizeOfGlobalsInConstantVal(Init, Size);
+ return Size;
+}
+
+/// GetSizeOfGlobalsInBytes - walk the code for the function, looking for
+/// globals; then walk the initializers of those globals looking for more.
+/// If their size has not been considered yet, add it into the running total
+/// Size.
+
+unsigned JITEmitter::GetSizeOfGlobalsInBytes(MachineFunction &MF) {
+ unsigned Size = 0;
+ GVSet.clear();
+
+ for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
+ MBB != E; ++MBB) {
+ for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
+ I != E; ++I) {
+ const TargetInstrDesc &Desc = I->getDesc();
+ const MachineInstr &MI = *I;
+ unsigned NumOps = Desc.getNumOperands();
+ for (unsigned CurOp = 0; CurOp < NumOps; CurOp++) {
+ const MachineOperand &MO = MI.getOperand(CurOp);
+ if (MO.isGlobal()) {
+ GlobalValue* V = MO.getGlobal();
+ const GlobalVariable *GV = dyn_cast<const GlobalVariable>(V);
+ if (!GV)
+ continue;
+ // If seen in previous function, it will have an entry here.
+ if (TheJIT->getPointerToGlobalIfAvailable(GV))
+ continue;
+ // If seen earlier in this function, it will have an entry here.
+ // FIXME: it should be possible to combine these tables, by
+ // assuming the addresses of the new globals in this module
+ // start at 0 (or something) and adjusting them after codegen
+ // complete. Another possibility is to grab a marker bit in GV.
+ if (GVSet.insert(GV))
+ // A variable as yet unseen. Add in its size.
+ Size = addSizeOfGlobal(GV, Size);
+ }
+ }
+ }
+ }
+ DOUT << "JIT: About to look through initializers\n";
+ // Look for more globals that are referenced only from initializers.
+ // GVSet.end is computed each time because the set can grow as we go.
+ for (SmallPtrSet<const GlobalVariable *, 8>::iterator I = GVSet.begin();
+ I != GVSet.end(); I++) {
+ const GlobalVariable* GV = *I;
+ if (GV->hasInitializer())
+ Size = addSizeOfGlobalsInInitializer(GV->getInitializer(), Size);
+ }
+
+ return Size;
+}
+
+void JITEmitter::startFunction(MachineFunction &F) {
+ DOUT << "JIT: Starting CodeGen of Function "
+ << F.getFunction()->getName() << "\n";
+
+ uintptr_t ActualSize = 0;
+ // Set the memory writable, if it's not already
+ MemMgr->setMemoryWritable();
+ if (MemMgr->NeedsExactSize()) {
+ DOUT << "JIT: ExactSize\n";
+ const TargetInstrInfo* TII = F.getTarget().getInstrInfo();
+ MachineJumpTableInfo *MJTI = F.getJumpTableInfo();
+ MachineConstantPool *MCP = F.getConstantPool();
+
+ // Ensure the constant pool/jump table info is at least 4-byte aligned.
+ ActualSize = RoundUpToAlign(ActualSize, 16);
+
+ // Add the alignment of the constant pool
+ ActualSize = RoundUpToAlign(ActualSize, MCP->getConstantPoolAlignment());
+
+ // Add the constant pool size
+ ActualSize += GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData());
+
+ // Add the aligment of the jump table info
+ ActualSize = RoundUpToAlign(ActualSize, MJTI->getAlignment());
+
+ // Add the jump table size
+ ActualSize += GetJumpTableSizeInBytes(MJTI);
+
+ // Add the alignment for the function
+ ActualSize = RoundUpToAlign(ActualSize,
+ std::max(F.getFunction()->getAlignment(), 8U));
+
+ // Add the function size
+ ActualSize += TII->GetFunctionSizeInBytes(F);
+
+ DOUT << "JIT: ActualSize before globals " << ActualSize << "\n";
+ // Add the size of the globals that will be allocated after this function.
+ // These are all the ones referenced from this function that were not
+ // previously allocated.
+ ActualSize += GetSizeOfGlobalsInBytes(F);
+ DOUT << "JIT: ActualSize after globals " << ActualSize << "\n";
+ }
+
+ BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(),
+ ActualSize);
+ BufferEnd = BufferBegin+ActualSize;
+
+ // Ensure the constant pool/jump table info is at least 4-byte aligned.
+ emitAlignment(16);
+
+ emitConstantPool(F.getConstantPool());
+ initJumpTableInfo(F.getJumpTableInfo());
+
+ // About to start emitting the machine code for the function.
+ emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
+ TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr);
+
+ MBBLocations.clear();
+}
+
+bool JITEmitter::finishFunction(MachineFunction &F) {
+ if (CurBufferPtr == BufferEnd) {
+ // FIXME: Allocate more space, then try again.
+ cerr << "JIT: Ran out of space for generated machine code!\n";
+ abort();
+ }
+
+ emitJumpTableInfo(F.getJumpTableInfo());
+
+ // FnStart is the start of the text, not the start of the constant pool and
+ // other per-function data.
+ uint8_t *FnStart =
+ (uint8_t *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction());
+
+ // FnEnd is the end of the function's machine code.
+ uint8_t *FnEnd = CurBufferPtr;
+
+ if (!Relocations.empty()) {
+ CurFn = F.getFunction();
+ NumRelos += Relocations.size();
+
+ // Resolve the relocations to concrete pointers.
+ for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
+ MachineRelocation &MR = Relocations[i];
+ void *ResultPtr = 0;
+ if (!MR.letTargetResolve()) {
+ if (MR.isExternalSymbol()) {
+ ResultPtr = TheJIT->getPointerToNamedFunction(MR.getExternalSymbol(),
+ false);
+ DOUT << "JIT: Map \'" << MR.getExternalSymbol() << "\' to ["
+ << ResultPtr << "]\n";
+
+ // If the target REALLY wants a stub for this function, emit it now.
+ if (!MR.doesntNeedStub()) {
+ if (!TheJIT->areDlsymStubsEnabled()) {
+ ResultPtr = Resolver.getExternalFunctionStub(ResultPtr);
+ } else {
+ void *&Stub = ExtFnStubs[MR.getExternalSymbol()];
+ if (!Stub) {
+ Stub = Resolver.getExternalFunctionStub((void *)&Stub);
+ AddStubToCurrentFunction(Stub);
+ }
+ ResultPtr = Stub;
+ }
+ }
+ } else if (MR.isGlobalValue()) {
+ ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
+ BufferBegin+MR.getMachineCodeOffset(),
+ MR.doesntNeedStub());
+ } else if (MR.isIndirectSymbol()) {
+ ResultPtr = getPointerToGVIndirectSym(MR.getGlobalValue(),
+ BufferBegin+MR.getMachineCodeOffset(),
+ MR.doesntNeedStub());
+ } else if (MR.isBasicBlock()) {
+ ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock());
+ } else if (MR.isConstantPoolIndex()) {
+ ResultPtr = (void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
+ } else {
+ assert(MR.isJumpTableIndex());
+ ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex());
+ }
+
+ MR.setResultPointer(ResultPtr);
+ }
+
+ // if we are managing the GOT and the relocation wants an index,
+ // give it one
+ if (MR.isGOTRelative() && MemMgr->isManagingGOT()) {
+ unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr);
+ MR.setGOTIndex(idx);
+ if (((void**)MemMgr->getGOTBase())[idx] != ResultPtr) {
+ DOUT << "JIT: GOT was out of date for " << ResultPtr
+ << " pointing at " << ((void**)MemMgr->getGOTBase())[idx]
+ << "\n";
+ ((void**)MemMgr->getGOTBase())[idx] = ResultPtr;
+ }
+ }
+ }
+
+ CurFn = 0;
+ TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
+ Relocations.size(), MemMgr->getGOTBase());
+ }
+
+ // Update the GOT entry for F to point to the new code.
+ if (MemMgr->isManagingGOT()) {
+ unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin);
+ if (((void**)MemMgr->getGOTBase())[idx] != (void*)BufferBegin) {
+ DOUT << "JIT: GOT was out of date for " << (void*)BufferBegin
+ << " pointing at " << ((void**)MemMgr->getGOTBase())[idx] << "\n";
+ ((void**)MemMgr->getGOTBase())[idx] = (void*)BufferBegin;
+ }
+ }
+
+ // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
+ // global variables that were referenced in the relocations.
+ MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
+
+ if (CurBufferPtr == BufferEnd) {
+ // FIXME: Allocate more space, then try again.
+ cerr << "JIT: Ran out of space for generated machine code!\n";
+ abort();
+ }
+
+ BufferBegin = CurBufferPtr = 0;
+ NumBytes += FnEnd-FnStart;
+
+ // Invalidate the icache if necessary.
+ sys::Memory::InvalidateInstructionCache(FnStart, FnEnd-FnStart);
+
+ // Add it to the JIT symbol table if the host wants it.
+ AddFunctionToSymbolTable(F.getFunction()->getNameStart(),
+ FnStart, FnEnd-FnStart);
+
+ DOUT << "JIT: Finished CodeGen of [" << (void*)FnStart
+ << "] Function: " << F.getFunction()->getName()
+ << ": " << (FnEnd-FnStart) << " bytes of text, "
+ << Relocations.size() << " relocations\n";
+
+ if (MCI) {
+ MCI->setAddress(FnStart);
+ MCI->setSize(FnEnd-FnStart);
+ }
+
+ Relocations.clear();
+ ConstPoolAddresses.clear();
+
+ // Mark code region readable and executable if it's not so already.
+ MemMgr->setMemoryExecutable();
+
+#ifndef NDEBUG
+ {
+ if (sys::hasDisassembler()) {
+ DOUT << "JIT: Disassembled code:\n";
+ DOUT << sys::disassembleBuffer(FnStart, FnEnd-FnStart, (uintptr_t)FnStart);
+ } else {
+ DOUT << "JIT: Binary code:\n";
+ DOUT << std::hex;
+ uint8_t* q = FnStart;
+ for (int i = 0; q < FnEnd; q += 4, ++i) {
+ if (i == 4)
+ i = 0;
+ if (i == 0)
+ DOUT << "JIT: " << std::setw(8) << std::setfill('0')
+ << (long)(q - FnStart) << ": ";
+ bool Done = false;
+ for (int j = 3; j >= 0; --j) {
+ if (q + j >= FnEnd)
+ Done = true;
+ else
+ DOUT << std::setw(2) << std::setfill('0') << (unsigned short)q[j];
+ }
+ if (Done)
+ break;
+ DOUT << ' ';
+ if (i == 3)
+ DOUT << '\n';
+ }
+ DOUT << std::dec;
+ DOUT<< '\n';
+ }
+ }
+#endif
+ if (ExceptionHandling) {
+ uintptr_t ActualSize = 0;
+ SavedBufferBegin = BufferBegin;
+ SavedBufferEnd = BufferEnd;
+ SavedCurBufferPtr = CurBufferPtr;
+
+ if (MemMgr->NeedsExactSize()) {
+ ActualSize = DE->GetDwarfTableSizeInBytes(F, *this, FnStart, FnEnd);
+ }
+
+ BufferBegin = CurBufferPtr = MemMgr->startExceptionTable(F.getFunction(),
+ ActualSize);
+ BufferEnd = BufferBegin+ActualSize;
+ uint8_t* FrameRegister = DE->EmitDwarfTable(F, *this, FnStart, FnEnd);
+ MemMgr->endExceptionTable(F.getFunction(), BufferBegin, CurBufferPtr,
+ FrameRegister);
+ BufferBegin = SavedBufferBegin;
+ BufferEnd = SavedBufferEnd;
+ CurBufferPtr = SavedCurBufferPtr;
+
+ TheJIT->RegisterTable(FrameRegister);
+ }
+
+ if (MMI)
+ MMI->EndFunction();
+
+ return false;
+}
+
+/// deallocateMemForFunction - Deallocate all memory for the specified
+/// function body. Also drop any references the function has to stubs.
+void JITEmitter::deallocateMemForFunction(Function *F) {
+ MemMgr->deallocateMemForFunction(F);
+
+ // If the function did not reference any stubs, return.
+ if (CurFnStubUses.find(F) == CurFnStubUses.end())
+ return;
+
+ // For each referenced stub, erase the reference to this function, and then
+ // erase the list of referenced stubs.
+ SmallVectorImpl<void *> &StubList = CurFnStubUses[F];
+ for (unsigned i = 0, e = StubList.size(); i != e; ++i) {
+ void *Stub = StubList[i];
+
+ // If we already invalidated this stub for this function, continue.
+ if (StubFnRefs.count(Stub) == 0)
+ continue;
+
+ SmallPtrSet<const Function *, 1> &FnRefs = StubFnRefs[Stub];
+ FnRefs.erase(F);
+
+ // If this function was the last reference to the stub, invalidate the stub
+ // in the JITResolver. Were there a memory manager deallocateStub routine,
+ // we could call that at this point too.
+ if (FnRefs.empty()) {
+ DOUT << "\nJIT: Invalidated Stub at [" << Stub << "]\n";
+ StubFnRefs.erase(Stub);
+
+ // Invalidate the stub. If it is a GV stub, update the JIT's global
+ // mapping for that GV to zero, otherwise, search the string map of
+ // external function names to stubs and remove the entry for this stub.
+ GlobalValue *GV = Resolver.invalidateStub(Stub);
+ if (GV) {
+ TheJIT->updateGlobalMapping(GV, 0);
+ } else {
+ for (StringMapIterator<void*> i = ExtFnStubs.begin(),
+ e = ExtFnStubs.end(); i != e; ++i) {
+ if (i->second == Stub) {
+ ExtFnStubs.erase(i);
+ break;
+ }
+ }
+ }
+ }
+ }
+ CurFnStubUses.erase(F);
+}
+
+
+void* JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) {
+ if (BufferBegin)
+ return JITCodeEmitter::allocateSpace(Size, Alignment);
+
+ // create a new memory block if there is no active one.
+ // care must be taken so that BufferBegin is invalidated when a
+ // block is trimmed
+ BufferBegin = CurBufferPtr = MemMgr->allocateSpace(Size, Alignment);
+ BufferEnd = BufferBegin+Size;
+ return CurBufferPtr;
+}
+
+void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
+ if (TheJIT->getJITInfo().hasCustomConstantPool())
+ return;
+
+ const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
+ if (Constants.empty()) return;
+
+ unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData());
+ unsigned Align = MCP->getConstantPoolAlignment();
+ ConstantPoolBase = allocateSpace(Size, Align);
+ ConstantPool = MCP;
+
+ if (ConstantPoolBase == 0) return; // Buffer overflow.
+
+ DOUT << "JIT: Emitted constant pool at [" << ConstantPoolBase
+ << "] (size: " << Size << ", alignment: " << Align << ")\n";
+
+ // Initialize the memory for all of the constant pool entries.
+ unsigned Offset = 0;
+ for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
+ MachineConstantPoolEntry CPE = Constants[i];
+ unsigned AlignMask = CPE.getAlignment() - 1;
+ Offset = (Offset + AlignMask) & ~AlignMask;
+
+ uintptr_t CAddr = (uintptr_t)ConstantPoolBase + Offset;
+ ConstPoolAddresses.push_back(CAddr);
+ if (CPE.isMachineConstantPoolEntry()) {
+ // FIXME: add support to lower machine constant pool values into bytes!
+ cerr << "Initialize memory with machine specific constant pool entry"
+ << " has not been implemented!\n";
+ abort();
+ }
+ TheJIT->InitializeMemory(CPE.Val.ConstVal, (void*)CAddr);
+ DOUT << "JIT: CP" << i << " at [0x"
+ << std::hex << CAddr << std::dec << "]\n";
+
+ const Type *Ty = CPE.Val.ConstVal->getType();
+ Offset += TheJIT->getTargetData()->getTypeAllocSize(Ty);
+ }
+}
+
+void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
+ if (TheJIT->getJITInfo().hasCustomJumpTables())
+ return;
+
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ if (JT.empty()) return;
+
+ unsigned NumEntries = 0;
+ for (unsigned i = 0, e = JT.size(); i != e; ++i)
+ NumEntries += JT[i].MBBs.size();
+
+ unsigned EntrySize = MJTI->getEntrySize();
+
+ // Just allocate space for all the jump tables now. We will fix up the actual
+ // MBB entries in the tables after we emit the code for each block, since then
+ // we will know the final locations of the MBBs in memory.
+ JumpTable = MJTI;
+ JumpTableBase = allocateSpace(NumEntries * EntrySize, MJTI->getAlignment());
+}
+
+void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
+ if (TheJIT->getJITInfo().hasCustomJumpTables())
+ return;
+
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ if (JT.empty() || JumpTableBase == 0) return;
+
+ if (TargetMachine::getRelocationModel() == Reloc::PIC_) {
+ assert(MJTI->getEntrySize() == 4 && "Cross JIT'ing?");
+ // For each jump table, place the offset from the beginning of the table
+ // to the target address.
+ int *SlotPtr = (int*)JumpTableBase;
+
+ for (unsigned i = 0, e = JT.size(); i != e; ++i) {
+ const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
+ // Store the offset of the basic block for this jump table slot in the
+ // memory we allocated for the jump table in 'initJumpTableInfo'
+ uintptr_t Base = (uintptr_t)SlotPtr;
+ for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) {
+ uintptr_t MBBAddr = getMachineBasicBlockAddress(MBBs[mi]);
+ *SlotPtr++ = TheJIT->getJITInfo().getPICJumpTableEntry(MBBAddr, Base);
+ }
+ }
+ } else {
+ assert(MJTI->getEntrySize() == sizeof(void*) && "Cross JIT'ing?");
+
+ // For each jump table, map each target in the jump table to the address of
+ // an emitted MachineBasicBlock.
+ intptr_t *SlotPtr = (intptr_t*)JumpTableBase;
+
+ for (unsigned i = 0, e = JT.size(); i != e; ++i) {
+ const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
+ // Store the address of the basic block for this jump table slot in the
+ // memory we allocated for the jump table in 'initJumpTableInfo'
+ for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi)
+ *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]);
+ }
+ }
+}
+
+void JITEmitter::startGVStub(const GlobalValue* GV, unsigned StubSize,
+ unsigned Alignment) {
+ SavedBufferBegin = BufferBegin;
+ SavedBufferEnd = BufferEnd;
+ SavedCurBufferPtr = CurBufferPtr;
+
+ BufferBegin = CurBufferPtr = MemMgr->allocateStub(GV, StubSize, Alignment);
+ BufferEnd = BufferBegin+StubSize+1;
+}
+
+void JITEmitter::startGVStub(const GlobalValue* GV, void *Buffer,
+ unsigned StubSize) {
+ SavedBufferBegin = BufferBegin;
+ SavedBufferEnd = BufferEnd;
+ SavedCurBufferPtr = CurBufferPtr;
+
+ BufferBegin = CurBufferPtr = (uint8_t *)Buffer;
+ BufferEnd = BufferBegin+StubSize+1;
+}
+
+void *JITEmitter::finishGVStub(const GlobalValue* GV) {
+ NumBytes += getCurrentPCOffset();
+ std::swap(SavedBufferBegin, BufferBegin);
+ BufferEnd = SavedBufferEnd;
+ CurBufferPtr = SavedCurBufferPtr;
+ return SavedBufferBegin;
+}
+
+// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry
+// in the constant pool that was last emitted with the 'emitConstantPool'
+// method.
+//
+uintptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const {
+ assert(ConstantNum < ConstantPool->getConstants().size() &&
+ "Invalid ConstantPoolIndex!");
+ return ConstPoolAddresses[ConstantNum];
+}
+
+// getJumpTableEntryAddress - Return the address of the JumpTable with index
+// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo'
+//
+uintptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const {
+ const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables();
+ assert(Index < JT.size() && "Invalid jump table index!");
+
+ unsigned Offset = 0;
+ unsigned EntrySize = JumpTable->getEntrySize();
+
+ for (unsigned i = 0; i < Index; ++i)
+ Offset += JT[i].MBBs.size();
+
+ Offset *= EntrySize;
+
+ return (uintptr_t)((char *)JumpTableBase + Offset);
+}
+
+//===----------------------------------------------------------------------===//
+// Public interface to this file
+//===----------------------------------------------------------------------===//
+
+JITCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM) {
+ return new JITEmitter(jit, JMM);
+}
+
+// getPointerToNamedFunction - This function is used as a global wrapper to
+// JIT::getPointerToNamedFunction for the purpose of resolving symbols when
+// bugpoint is debugging the JIT. In that scenario, we are loading an .so and
+// need to resolve function(s) that are being mis-codegenerated, so we need to
+// resolve their addresses at runtime, and this is the way to do it.
+extern "C" {
+ void *getPointerToNamedFunction(const char *Name) {
+ if (Function *F = TheJIT->FindFunctionNamed(Name))
+ return TheJIT->getPointerToFunction(F);
+ return TheJIT->getPointerToNamedFunction(Name);
+ }
+}
+
+// getPointerToFunctionOrStub - If the specified function has been
+// code-gen'd, return a pointer to the function. If not, compile it, or use
+// a stub to implement lazy compilation if available.
+//
+void *JIT::getPointerToFunctionOrStub(Function *F) {
+ // If we have already code generated the function, just return the address.
+ if (void *Addr = getPointerToGlobalIfAvailable(F))
+ return Addr;
+
+ // Get a stub if the target supports it.
+ assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
+ JITEmitter *JE = cast<JITEmitter>(getCodeEmitter());
+ return JE->getJITResolver().getFunctionStub(F);
+}
+
+void JIT::registerMachineCodeInfo(MachineCodeInfo *mc) {
+ assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
+ JITEmitter *JE = cast<JITEmitter>(getCodeEmitter());
+
+ JE->setMachineCodeInfo(mc);
+}
+
+void JIT::updateFunctionStub(Function *F) {
+ // Get the empty stub we generated earlier.
+ assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
+ JITEmitter *JE = cast<JITEmitter>(getCodeEmitter());
+ void *Stub = JE->getJITResolver().getFunctionStub(F);
+
+ // Tell the target jit info to rewrite the stub at the specified address,
+ // rather than creating a new one.
+ void *Addr = getPointerToGlobalIfAvailable(F);
+ getJITInfo().emitFunctionStubAtAddr(F, Addr, Stub, *getCodeEmitter());
+}
+
+/// updateDlsymStubTable - Emit the data necessary to relocate the stubs
+/// that were emitted during code generation.
+///
+void JIT::updateDlsymStubTable() {
+ assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
+ JITEmitter *JE = cast<JITEmitter>(getCodeEmitter());
+
+ SmallVector<GlobalValue*, 8> GVs;
+ SmallVector<void*, 8> Ptrs;
+ const StringMap<void *> &ExtFns = JE->getExternalFnStubs();
+
+ JE->getJITResolver().getRelocatableGVs(GVs, Ptrs);
+
+ unsigned nStubs = GVs.size() + ExtFns.size();
+
+ // If there are no relocatable stubs, return.
+ if (nStubs == 0)
+ return;
+
+ // If there are no new relocatable stubs, return.
+ void *CurTable = JE->getMemMgr()->getDlsymTable();
+ if (CurTable && (*(unsigned *)CurTable == nStubs))
+ return;
+
+ // Calculate the size of the stub info
+ unsigned offset = 4 + 4 * nStubs + sizeof(intptr_t) * nStubs;
+
+ SmallVector<unsigned, 8> Offsets;
+ for (unsigned i = 0; i != GVs.size(); ++i) {
+ Offsets.push_back(offset);
+ offset += GVs[i]->getName().length() + 1;
+ }
+ for (StringMapConstIterator<void*> i = ExtFns.begin(), e = ExtFns.end();
+ i != e; ++i) {
+ Offsets.push_back(offset);
+ offset += strlen(i->first()) + 1;
+ }
+
+ // Allocate space for the new "stub", which contains the dlsym table.
+ JE->startGVStub(0, offset, 4);
+
+ // Emit the number of records
+ JE->emitInt32(nStubs);
+
+ // Emit the string offsets
+ for (unsigned i = 0; i != nStubs; ++i)
+ JE->emitInt32(Offsets[i]);
+
+ // Emit the pointers. Verify that they are at least 2-byte aligned, and set
+ // the low bit to 0 == GV, 1 == Function, so that the client code doing the
+ // relocation can write the relocated pointer at the appropriate place in
+ // the stub.
+ for (unsigned i = 0; i != GVs.size(); ++i) {
+ intptr_t Ptr = (intptr_t)Ptrs[i];
+ assert((Ptr & 1) == 0 && "Stub pointers must be at least 2-byte aligned!");
+
+ if (isa<Function>(GVs[i]))
+ Ptr |= (intptr_t)1;
+
+ if (sizeof(Ptr) == 8)
+ JE->emitInt64(Ptr);
+ else
+ JE->emitInt32(Ptr);
+ }
+ for (StringMapConstIterator<void*> i = ExtFns.begin(), e = ExtFns.end();
+ i != e; ++i) {
+ intptr_t Ptr = (intptr_t)i->second | 1;
+
+ if (sizeof(Ptr) == 8)
+ JE->emitInt64(Ptr);
+ else
+ JE->emitInt32(Ptr);
+ }
+
+ // Emit the strings.
+ for (unsigned i = 0; i != GVs.size(); ++i)
+ JE->emitString(GVs[i]->getName());
+ for (StringMapConstIterator<void*> i = ExtFns.begin(), e = ExtFns.end();
+ i != e; ++i)
+ JE->emitString(i->first());
+
+ // Tell the JIT memory manager where it is. The JIT Memory Manager will
+ // deallocate space for the old one, if one existed.
+ JE->getMemMgr()->SetDlsymTable(JE->finishGVStub(0));
+}
+
+/// freeMachineCodeForFunction - release machine code memory for given Function.
+///
+void JIT::freeMachineCodeForFunction(Function *F) {
+
+ // Delete translation for this from the ExecutionEngine, so it will get
+ // retranslated next time it is used.
+ void *OldPtr = updateGlobalMapping(F, 0);
+
+ if (OldPtr)
+ RemoveFunctionFromSymbolTable(OldPtr);
+
+ // Free the actual memory for the function body and related stuff.
+ assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
+ cast<JITEmitter>(JCE)->deallocateMemForFunction(F);
+}
+
diff --git a/lib/ExecutionEngine/JIT/JITMemoryManager.cpp b/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
new file mode 100644
index 0000000..70ccdcc
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
@@ -0,0 +1,541 @@
+//===-- JITMemoryManager.cpp - Memory Allocator for JIT'd code ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DefaultJITMemoryManager class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/GlobalValue.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/System/Memory.h"
+#include <map>
+#include <vector>
+#include <cassert>
+#include <climits>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+using namespace llvm;
+
+
+JITMemoryManager::~JITMemoryManager() {}
+
+//===----------------------------------------------------------------------===//
+// Memory Block Implementation.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// MemoryRangeHeader - For a range of memory, this is the header that we put
+ /// on the block of memory. It is carefully crafted to be one word of memory.
+ /// Allocated blocks have just this header, free'd blocks have FreeRangeHeader
+ /// which starts with this.
+ struct FreeRangeHeader;
+ struct MemoryRangeHeader {
+ /// ThisAllocated - This is true if this block is currently allocated. If
+ /// not, this can be converted to a FreeRangeHeader.
+ unsigned ThisAllocated : 1;
+
+ /// PrevAllocated - Keep track of whether the block immediately before us is
+ /// allocated. If not, the word immediately before this header is the size
+ /// of the previous block.
+ unsigned PrevAllocated : 1;
+
+ /// BlockSize - This is the size in bytes of this memory block,
+ /// including this header.
+ uintptr_t BlockSize : (sizeof(intptr_t)*CHAR_BIT - 2);
+
+
+ /// getBlockAfter - Return the memory block immediately after this one.
+ ///
+ MemoryRangeHeader &getBlockAfter() const {
+ return *(MemoryRangeHeader*)((char*)this+BlockSize);
+ }
+
+ /// getFreeBlockBefore - If the block before this one is free, return it,
+ /// otherwise return null.
+ FreeRangeHeader *getFreeBlockBefore() const {
+ if (PrevAllocated) return 0;
+ intptr_t PrevSize = ((intptr_t *)this)[-1];
+ return (FreeRangeHeader*)((char*)this-PrevSize);
+ }
+
+ /// FreeBlock - Turn an allocated block into a free block, adjusting
+ /// bits in the object headers, and adding an end of region memory block.
+ FreeRangeHeader *FreeBlock(FreeRangeHeader *FreeList);
+
+ /// TrimAllocationToSize - If this allocated block is significantly larger
+ /// than NewSize, split it into two pieces (where the former is NewSize
+ /// bytes, including the header), and add the new block to the free list.
+ FreeRangeHeader *TrimAllocationToSize(FreeRangeHeader *FreeList,
+ uint64_t NewSize);
+ };
+
+ /// FreeRangeHeader - For a memory block that isn't already allocated, this
+ /// keeps track of the current block and has a pointer to the next free block.
+ /// Free blocks are kept on a circularly linked list.
+ struct FreeRangeHeader : public MemoryRangeHeader {
+ FreeRangeHeader *Prev;
+ FreeRangeHeader *Next;
+
+ /// getMinBlockSize - Get the minimum size for a memory block. Blocks
+ /// smaller than this size cannot be created.
+ static unsigned getMinBlockSize() {
+ return sizeof(FreeRangeHeader)+sizeof(intptr_t);
+ }
+
+ /// SetEndOfBlockSizeMarker - The word at the end of every free block is
+ /// known to be the size of the free block. Set it for this block.
+ void SetEndOfBlockSizeMarker() {
+ void *EndOfBlock = (char*)this + BlockSize;
+ ((intptr_t *)EndOfBlock)[-1] = BlockSize;
+ }
+
+ FreeRangeHeader *RemoveFromFreeList() {
+ assert(Next->Prev == this && Prev->Next == this && "Freelist broken!");
+ Next->Prev = Prev;
+ return Prev->Next = Next;
+ }
+
+ void AddToFreeList(FreeRangeHeader *FreeList) {
+ Next = FreeList;
+ Prev = FreeList->Prev;
+ Prev->Next = this;
+ Next->Prev = this;
+ }
+
+ /// GrowBlock - The block after this block just got deallocated. Merge it
+ /// into the current block.
+ void GrowBlock(uintptr_t NewSize);
+
+ /// AllocateBlock - Mark this entire block allocated, updating freelists
+ /// etc. This returns a pointer to the circular free-list.
+ FreeRangeHeader *AllocateBlock();
+ };
+}
+
+
+/// AllocateBlock - Mark this entire block allocated, updating freelists
+/// etc. This returns a pointer to the circular free-list.
+FreeRangeHeader *FreeRangeHeader::AllocateBlock() {
+ assert(!ThisAllocated && !getBlockAfter().PrevAllocated &&
+ "Cannot allocate an allocated block!");
+ // Mark this block allocated.
+ ThisAllocated = 1;
+ getBlockAfter().PrevAllocated = 1;
+
+ // Remove it from the free list.
+ return RemoveFromFreeList();
+}
+
+/// FreeBlock - Turn an allocated block into a free block, adjusting
+/// bits in the object headers, and adding an end of region memory block.
+/// If possible, coalesce this block with neighboring blocks. Return the
+/// FreeRangeHeader to allocate from.
+FreeRangeHeader *MemoryRangeHeader::FreeBlock(FreeRangeHeader *FreeList) {
+ MemoryRangeHeader *FollowingBlock = &getBlockAfter();
+ assert(ThisAllocated && "This block is already allocated!");
+ assert(FollowingBlock->PrevAllocated && "Flags out of sync!");
+
+ FreeRangeHeader *FreeListToReturn = FreeList;
+
+ // If the block after this one is free, merge it into this block.
+ if (!FollowingBlock->ThisAllocated) {
+ FreeRangeHeader &FollowingFreeBlock = *(FreeRangeHeader *)FollowingBlock;
+ // "FreeList" always needs to be a valid free block. If we're about to
+ // coalesce with it, update our notion of what the free list is.
+ if (&FollowingFreeBlock == FreeList) {
+ FreeList = FollowingFreeBlock.Next;
+ FreeListToReturn = 0;
+ assert(&FollowingFreeBlock != FreeList && "No tombstone block?");
+ }
+ FollowingFreeBlock.RemoveFromFreeList();
+
+ // Include the following block into this one.
+ BlockSize += FollowingFreeBlock.BlockSize;
+ FollowingBlock = &FollowingFreeBlock.getBlockAfter();
+
+ // Tell the block after the block we are coalescing that this block is
+ // allocated.
+ FollowingBlock->PrevAllocated = 1;
+ }
+
+ assert(FollowingBlock->ThisAllocated && "Missed coalescing?");
+
+ if (FreeRangeHeader *PrevFreeBlock = getFreeBlockBefore()) {
+ PrevFreeBlock->GrowBlock(PrevFreeBlock->BlockSize + BlockSize);
+ return FreeListToReturn ? FreeListToReturn : PrevFreeBlock;
+ }
+
+ // Otherwise, mark this block free.
+ FreeRangeHeader &FreeBlock = *(FreeRangeHeader*)this;
+ FollowingBlock->PrevAllocated = 0;
+ FreeBlock.ThisAllocated = 0;
+
+ // Link this into the linked list of free blocks.
+ FreeBlock.AddToFreeList(FreeList);
+
+ // Add a marker at the end of the block, indicating the size of this free
+ // block.
+ FreeBlock.SetEndOfBlockSizeMarker();
+ return FreeListToReturn ? FreeListToReturn : &FreeBlock;
+}
+
+/// GrowBlock - The block after this block just got deallocated. Merge it
+/// into the current block.
+void FreeRangeHeader::GrowBlock(uintptr_t NewSize) {
+ assert(NewSize > BlockSize && "Not growing block?");
+ BlockSize = NewSize;
+ SetEndOfBlockSizeMarker();
+ getBlockAfter().PrevAllocated = 0;
+}
+
+/// TrimAllocationToSize - If this allocated block is significantly larger
+/// than NewSize, split it into two pieces (where the former is NewSize
+/// bytes, including the header), and add the new block to the free list.
+FreeRangeHeader *MemoryRangeHeader::
+TrimAllocationToSize(FreeRangeHeader *FreeList, uint64_t NewSize) {
+ assert(ThisAllocated && getBlockAfter().PrevAllocated &&
+ "Cannot deallocate part of an allocated block!");
+
+ // Don't allow blocks to be trimmed below minimum required size
+ NewSize = std::max<uint64_t>(FreeRangeHeader::getMinBlockSize(), NewSize);
+
+ // Round up size for alignment of header.
+ unsigned HeaderAlign = __alignof(FreeRangeHeader);
+ NewSize = (NewSize+ (HeaderAlign-1)) & ~(HeaderAlign-1);
+
+ // Size is now the size of the block we will remove from the start of the
+ // current block.
+ assert(NewSize <= BlockSize &&
+ "Allocating more space from this block than exists!");
+
+ // If splitting this block will cause the remainder to be too small, do not
+ // split the block.
+ if (BlockSize <= NewSize+FreeRangeHeader::getMinBlockSize())
+ return FreeList;
+
+ // Otherwise, we splice the required number of bytes out of this block, form
+ // a new block immediately after it, then mark this block allocated.
+ MemoryRangeHeader &FormerNextBlock = getBlockAfter();
+
+ // Change the size of this block.
+ BlockSize = NewSize;
+
+ // Get the new block we just sliced out and turn it into a free block.
+ FreeRangeHeader &NewNextBlock = (FreeRangeHeader &)getBlockAfter();
+ NewNextBlock.BlockSize = (char*)&FormerNextBlock - (char*)&NewNextBlock;
+ NewNextBlock.ThisAllocated = 0;
+ NewNextBlock.PrevAllocated = 1;
+ NewNextBlock.SetEndOfBlockSizeMarker();
+ FormerNextBlock.PrevAllocated = 0;
+ NewNextBlock.AddToFreeList(FreeList);
+ return &NewNextBlock;
+}
+
+//===----------------------------------------------------------------------===//
+// Memory Block Implementation.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// DefaultJITMemoryManager - Manage memory for the JIT code generation.
+ /// This splits a large block of MAP_NORESERVE'd memory into two
+ /// sections, one for function stubs, one for the functions themselves. We
+ /// have to do this because we may need to emit a function stub while in the
+ /// middle of emitting a function, and we don't know how large the function we
+ /// are emitting is.
+ class VISIBILITY_HIDDEN DefaultJITMemoryManager : public JITMemoryManager {
+ std::vector<sys::MemoryBlock> Blocks; // Memory blocks allocated by the JIT
+ FreeRangeHeader *FreeMemoryList; // Circular list of free blocks.
+
+ // When emitting code into a memory block, this is the block.
+ MemoryRangeHeader *CurBlock;
+
+ uint8_t *CurStubPtr, *StubBase;
+ uint8_t *GOTBase; // Target Specific reserved memory
+ void *DlsymTable; // Stub external symbol information
+
+ // Centralize memory block allocation.
+ sys::MemoryBlock getNewMemoryBlock(unsigned size);
+
+ std::map<const Function*, MemoryRangeHeader*> FunctionBlocks;
+ std::map<const Function*, MemoryRangeHeader*> TableBlocks;
+ public:
+ DefaultJITMemoryManager();
+ ~DefaultJITMemoryManager();
+
+ void AllocateGOT();
+ void SetDlsymTable(void *);
+
+ uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
+ unsigned Alignment);
+
+ /// startFunctionBody - When a function starts, allocate a block of free
+ /// executable memory, returning a pointer to it and its actual size.
+ uint8_t *startFunctionBody(const Function *F, uintptr_t &ActualSize) {
+
+ FreeRangeHeader* candidateBlock = FreeMemoryList;
+ FreeRangeHeader* head = FreeMemoryList;
+ FreeRangeHeader* iter = head->Next;
+
+ uintptr_t largest = candidateBlock->BlockSize;
+
+ // Search for the largest free block
+ while (iter != head) {
+ if (iter->BlockSize > largest) {
+ largest = iter->BlockSize;
+ candidateBlock = iter;
+ }
+ iter = iter->Next;
+ }
+
+ // Select this candidate block for allocation
+ CurBlock = candidateBlock;
+
+ // Allocate the entire memory block.
+ FreeMemoryList = candidateBlock->AllocateBlock();
+ ActualSize = CurBlock->BlockSize-sizeof(MemoryRangeHeader);
+ return (uint8_t *)(CurBlock+1);
+ }
+
+ /// endFunctionBody - The function F is now allocated, and takes the memory
+ /// in the range [FunctionStart,FunctionEnd).
+ void endFunctionBody(const Function *F, uint8_t *FunctionStart,
+ uint8_t *FunctionEnd) {
+ assert(FunctionEnd > FunctionStart);
+ assert(FunctionStart == (uint8_t *)(CurBlock+1) &&
+ "Mismatched function start/end!");
+
+ uintptr_t BlockSize = FunctionEnd - (uint8_t *)CurBlock;
+ FunctionBlocks[F] = CurBlock;
+
+ // Release the memory at the end of this block that isn't needed.
+ FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
+ }
+
+ /// allocateSpace - Allocate a memory block of the given size.
+ uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) {
+ CurBlock = FreeMemoryList;
+ FreeMemoryList = FreeMemoryList->AllocateBlock();
+
+ uint8_t *result = (uint8_t *)CurBlock+1;
+
+ if (Alignment == 0) Alignment = 1;
+ result = (uint8_t*)(((intptr_t)result+Alignment-1) &
+ ~(intptr_t)(Alignment-1));
+
+ uintptr_t BlockSize = result + Size - (uint8_t *)CurBlock;
+ FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
+
+ return result;
+ }
+
+ /// startExceptionTable - Use startFunctionBody to allocate memory for the
+ /// function's exception table.
+ uint8_t* startExceptionTable(const Function* F, uintptr_t &ActualSize) {
+ return startFunctionBody(F, ActualSize);
+ }
+
+ /// endExceptionTable - The exception table of F is now allocated,
+ /// and takes the memory in the range [TableStart,TableEnd).
+ void endExceptionTable(const Function *F, uint8_t *TableStart,
+ uint8_t *TableEnd, uint8_t* FrameRegister) {
+ assert(TableEnd > TableStart);
+ assert(TableStart == (uint8_t *)(CurBlock+1) &&
+ "Mismatched table start/end!");
+
+ uintptr_t BlockSize = TableEnd - (uint8_t *)CurBlock;
+ TableBlocks[F] = CurBlock;
+
+ // Release the memory at the end of this block that isn't needed.
+ FreeMemoryList =CurBlock->TrimAllocationToSize(FreeMemoryList, BlockSize);
+ }
+
+ uint8_t *getGOTBase() const {
+ return GOTBase;
+ }
+
+ void *getDlsymTable() const {
+ return DlsymTable;
+ }
+
+ /// deallocateMemForFunction - Deallocate all memory for the specified
+ /// function body.
+ void deallocateMemForFunction(const Function *F) {
+ std::map<const Function*, MemoryRangeHeader*>::iterator
+ I = FunctionBlocks.find(F);
+ if (I == FunctionBlocks.end()) return;
+
+ // Find the block that is allocated for this function.
+ MemoryRangeHeader *MemRange = I->second;
+ assert(MemRange->ThisAllocated && "Block isn't allocated!");
+
+ // Fill the buffer with garbage!
+#ifndef NDEBUG
+ memset(MemRange+1, 0xCD, MemRange->BlockSize-sizeof(*MemRange));
+#endif
+
+ // Free the memory.
+ FreeMemoryList = MemRange->FreeBlock(FreeMemoryList);
+
+ // Finally, remove this entry from FunctionBlocks.
+ FunctionBlocks.erase(I);
+
+ I = TableBlocks.find(F);
+ if (I == TableBlocks.end()) return;
+
+ // Find the block that is allocated for this function.
+ MemRange = I->second;
+ assert(MemRange->ThisAllocated && "Block isn't allocated!");
+
+ // Fill the buffer with garbage!
+#ifndef NDEBUG
+ memset(MemRange+1, 0xCD, MemRange->BlockSize-sizeof(*MemRange));
+#endif
+
+ // Free the memory.
+ FreeMemoryList = MemRange->FreeBlock(FreeMemoryList);
+
+ // Finally, remove this entry from TableBlocks.
+ TableBlocks.erase(I);
+ }
+
+ /// setMemoryWritable - When code generation is in progress,
+ /// the code pages may need permissions changed.
+ void setMemoryWritable(void)
+ {
+ for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
+ sys::Memory::setWritable(Blocks[i]);
+ }
+ /// setMemoryExecutable - When code generation is done and we're ready to
+ /// start execution, the code pages may need permissions changed.
+ void setMemoryExecutable(void)
+ {
+ for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
+ sys::Memory::setExecutable(Blocks[i]);
+ }
+ };
+}
+
+DefaultJITMemoryManager::DefaultJITMemoryManager() {
+ // Allocate a 16M block of memory for functions.
+#if defined(__APPLE__) && defined(__arm__)
+ sys::MemoryBlock MemBlock = getNewMemoryBlock(4 << 20);
+#else
+ sys::MemoryBlock MemBlock = getNewMemoryBlock(16 << 20);
+#endif
+
+ uint8_t *MemBase = static_cast<uint8_t*>(MemBlock.base());
+
+ // Allocate stubs backwards from the base, allocate functions forward
+ // from the base.
+ StubBase = MemBase;
+ CurStubPtr = MemBase + 512*1024; // Use 512k for stubs, working backwards.
+
+ // We set up the memory chunk with 4 mem regions, like this:
+ // [ START
+ // [ Free #0 ] -> Large space to allocate functions from.
+ // [ Allocated #1 ] -> Tiny space to separate regions.
+ // [ Free #2 ] -> Tiny space so there is always at least 1 free block.
+ // [ Allocated #3 ] -> Tiny space to prevent looking past end of block.
+ // END ]
+ //
+ // The last three blocks are never deallocated or touched.
+
+ // Add MemoryRangeHeader to the end of the memory region, indicating that
+ // the space after the block of memory is allocated. This is block #3.
+ MemoryRangeHeader *Mem3 = (MemoryRangeHeader*)(MemBase+MemBlock.size())-1;
+ Mem3->ThisAllocated = 1;
+ Mem3->PrevAllocated = 0;
+ Mem3->BlockSize = 0;
+
+ /// Add a tiny free region so that the free list always has one entry.
+ FreeRangeHeader *Mem2 =
+ (FreeRangeHeader *)(((char*)Mem3)-FreeRangeHeader::getMinBlockSize());
+ Mem2->ThisAllocated = 0;
+ Mem2->PrevAllocated = 1;
+ Mem2->BlockSize = FreeRangeHeader::getMinBlockSize();
+ Mem2->SetEndOfBlockSizeMarker();
+ Mem2->Prev = Mem2; // Mem2 *is* the free list for now.
+ Mem2->Next = Mem2;
+
+ /// Add a tiny allocated region so that Mem2 is never coalesced away.
+ MemoryRangeHeader *Mem1 = (MemoryRangeHeader*)Mem2-1;
+ Mem1->ThisAllocated = 1;
+ Mem1->PrevAllocated = 0;
+ Mem1->BlockSize = (char*)Mem2 - (char*)Mem1;
+
+ // Add a FreeRangeHeader to the start of the function body region, indicating
+ // that the space is free. Mark the previous block allocated so we never look
+ // at it.
+ FreeRangeHeader *Mem0 = (FreeRangeHeader*)CurStubPtr;
+ Mem0->ThisAllocated = 0;
+ Mem0->PrevAllocated = 1;
+ Mem0->BlockSize = (char*)Mem1-(char*)Mem0;
+ Mem0->SetEndOfBlockSizeMarker();
+ Mem0->AddToFreeList(Mem2);
+
+ // Start out with the freelist pointing to Mem0.
+ FreeMemoryList = Mem0;
+
+ GOTBase = NULL;
+ DlsymTable = NULL;
+}
+
+void DefaultJITMemoryManager::AllocateGOT() {
+ assert(GOTBase == 0 && "Cannot allocate the got multiple times");
+ GOTBase = new uint8_t[sizeof(void*) * 8192];
+ HasGOT = true;
+}
+
+void DefaultJITMemoryManager::SetDlsymTable(void *ptr) {
+ DlsymTable = ptr;
+}
+
+DefaultJITMemoryManager::~DefaultJITMemoryManager() {
+ for (unsigned i = 0, e = Blocks.size(); i != e; ++i)
+ sys::Memory::ReleaseRWX(Blocks[i]);
+
+ delete[] GOTBase;
+ Blocks.clear();
+}
+
+uint8_t *DefaultJITMemoryManager::allocateStub(const GlobalValue* F,
+ unsigned StubSize,
+ unsigned Alignment) {
+ CurStubPtr -= StubSize;
+ CurStubPtr = (uint8_t*)(((intptr_t)CurStubPtr) &
+ ~(intptr_t)(Alignment-1));
+ if (CurStubPtr < StubBase) {
+ // FIXME: allocate a new block
+ fprintf(stderr, "JIT ran out of memory for function stubs!\n");
+ abort();
+ }
+ return CurStubPtr;
+}
+
+sys::MemoryBlock DefaultJITMemoryManager::getNewMemoryBlock(unsigned size) {
+ // Allocate a new block close to the last one.
+ const sys::MemoryBlock *BOld = Blocks.empty() ? 0 : &Blocks.front();
+ std::string ErrMsg;
+ sys::MemoryBlock B = sys::Memory::AllocateRWX(size, BOld, &ErrMsg);
+ if (B.base() == 0) {
+ fprintf(stderr,
+ "Allocation failed when allocating new memory in the JIT\n%s\n",
+ ErrMsg.c_str());
+ abort();
+ }
+ Blocks.push_back(B);
+ return B;
+}
+
+
+JITMemoryManager *JITMemoryManager::CreateDefaultMemManager() {
+ return new DefaultJITMemoryManager();
+}
diff --git a/lib/ExecutionEngine/JIT/Makefile b/lib/ExecutionEngine/JIT/Makefile
new file mode 100644
index 0000000..e2c9c61
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/Makefile
@@ -0,0 +1,37 @@
+##===- lib/ExecutionEngine/JIT/Makefile --------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../..
+LIBRARYNAME = LLVMJIT
+
+# Get the $(ARCH) setting
+include $(LEVEL)/Makefile.config
+
+# Enable the X86 JIT if compiling on X86
+ifeq ($(ARCH), x86)
+ ENABLE_X86_JIT = 1
+endif
+
+# This flag can also be used on the command line to force inclusion
+# of the X86 JIT on non-X86 hosts
+ifdef ENABLE_X86_JIT
+ CPPFLAGS += -DENABLE_X86_JIT
+endif
+
+# Enable the Sparc JIT if compiling on Sparc
+ifeq ($(ARCH), Sparc)
+ ENABLE_SPARC_JIT = 1
+endif
+
+# This flag can also be used on the command line to force inclusion
+# of the Sparc JIT on non-Sparc hosts
+ifdef ENABLE_SPARC_JIT
+ CPPFLAGS += -DENABLE_SPARC_JIT
+endif
+
+include $(LEVEL)/Makefile.common
diff --git a/lib/ExecutionEngine/JIT/TargetSelect.cpp b/lib/ExecutionEngine/JIT/TargetSelect.cpp
new file mode 100644
index 0000000..0f20819
--- /dev/null
+++ b/lib/ExecutionEngine/JIT/TargetSelect.cpp
@@ -0,0 +1,83 @@
+//===-- TargetSelect.cpp - Target Chooser Code ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This just asks the TargetMachineRegistry for the appropriate JIT to use, and
+// allows the user to specify a specific one on the commandline with -march=x.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JIT.h"
+#include "llvm/Module.h"
+#include "llvm/ModuleProvider.h"
+#include "llvm/Support/RegistryParser.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Target/SubtargetFeature.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetMachineRegistry.h"
+using namespace llvm;
+
+static cl::opt<const TargetMachineRegistry::entry*, false,
+ RegistryParser<TargetMachine> >
+MArch("march", cl::desc("Architecture to generate assembly for:"));
+
+static cl::opt<std::string>
+MCPU("mcpu",
+ cl::desc("Target a specific cpu type (-mcpu=help for details)"),
+ cl::value_desc("cpu-name"),
+ cl::init(""));
+
+static cl::list<std::string>
+MAttrs("mattr",
+ cl::CommaSeparated,
+ cl::desc("Target specific attributes (-mattr=help for details)"),
+ cl::value_desc("a1,+a2,-a3,..."));
+
+/// createInternal - Create an return a new JIT compiler if there is one
+/// available for the current target. Otherwise, return null.
+///
+ExecutionEngine *JIT::createJIT(ModuleProvider *MP, std::string *ErrorStr,
+ JITMemoryManager *JMM,
+ CodeGenOpt::Level OptLevel) {
+ const TargetMachineRegistry::entry *TheArch = MArch;
+ if (TheArch == 0) {
+ std::string Error;
+ TheArch = TargetMachineRegistry::getClosestTargetForJIT(Error);
+ if (TheArch == 0) {
+ if (ErrorStr)
+ *ErrorStr = Error;
+ return 0;
+ }
+ } else if (TheArch->JITMatchQualityFn() == 0) {
+ cerr << "WARNING: This target JIT is not designed for the host you are"
+ << " running. If bad things happen, please choose a different "
+ << "-march switch.\n";
+ }
+
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (!MCPU.empty() || !MAttrs.empty()) {
+ SubtargetFeatures Features;
+ Features.setCPU(MCPU);
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ // Allocate a target...
+ TargetMachine *Target = TheArch->CtorFn(*MP->getModule(), FeaturesStr);
+ assert(Target && "Could not allocate target machine!");
+
+ // If the target supports JIT code generation, return a new JIT now.
+ if (TargetJITInfo *TJ = Target->getJITInfo())
+ return new JIT(MP, *Target, *TJ, JMM, OptLevel);
+
+ if (ErrorStr)
+ *ErrorStr = "target does not support JIT code generation";
+ return 0;
+}
OpenPOWER on IntegriCloud