diff options
Diffstat (limited to 'lib/Support/Unix')
-rw-r--r-- | lib/Support/Unix/Memory.inc | 206 | ||||
-rw-r--r-- | lib/Support/Unix/Path.inc | 17 | ||||
-rw-r--r-- | lib/Support/Unix/Signals.inc | 38 |
3 files changed, 232 insertions, 29 deletions
diff --git a/lib/Support/Unix/Memory.inc b/lib/Support/Unix/Memory.inc index 5a57a28..9a8abd2 100644 --- a/lib/Support/Unix/Memory.inc +++ b/lib/Support/Unix/Memory.inc @@ -13,6 +13,7 @@ #include "Unix.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Process.h" #ifdef HAVE_SYS_MMAN_H @@ -23,14 +24,146 @@ #include <mach/mach.h> #endif +#if defined(__mips__) +# if defined(__OpenBSD__) +# include <mips64/sysarch.h> +# else +# include <sys/cachectl.h> +# endif +#endif + +extern "C" void sys_icache_invalidate(const void *Addr, size_t len); + +namespace { + +int getPosixProtectionFlags(unsigned Flags) { + switch (Flags) { + case llvm::sys::Memory::MF_READ: + return PROT_READ; + case llvm::sys::Memory::MF_WRITE: + return PROT_WRITE; + case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: + return PROT_READ | PROT_WRITE; + case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: + return PROT_READ | PROT_EXEC; + case llvm::sys::Memory::MF_READ | + llvm::sys::Memory::MF_WRITE | + llvm::sys::Memory::MF_EXEC: + return PROT_READ | PROT_WRITE | PROT_EXEC; + case llvm::sys::Memory::MF_EXEC: + return PROT_EXEC; + default: + llvm_unreachable("Illegal memory protection flag specified!"); + } + // Provide a default return value as required by some compilers. + return PROT_NONE; +} + +} // namespace + +namespace llvm { +namespace sys { + +MemoryBlock +Memory::allocateMappedMemory(size_t NumBytes, + const MemoryBlock *const NearBlock, + unsigned PFlags, + error_code &EC) { + EC = error_code::success(); + if (NumBytes == 0) + return MemoryBlock(); + + static const size_t PageSize = Process::GetPageSize(); + const size_t NumPages = (NumBytes+PageSize-1)/PageSize; + + int fd = -1; +#ifdef NEED_DEV_ZERO_FOR_MMAP + static int zero_fd = open("/dev/zero", O_RDWR); + if (zero_fd == -1) { + EC = error_code(errno, system_category()); + return MemoryBlock(); + } + fd = zero_fd; +#endif + + int MMFlags = MAP_PRIVATE | +#ifdef HAVE_MMAP_ANONYMOUS + MAP_ANONYMOUS +#else + MAP_ANON +#endif + ; // Ends statement above + + int Protect = getPosixProtectionFlags(PFlags); + + // Use any near hint and the page size to set a page-aligned starting address + uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + + NearBlock->size() : 0; + if (Start && Start % PageSize) + Start += PageSize - Start % PageSize; + + void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages, + Protect, MMFlags, fd, 0); + if (Addr == MAP_FAILED) { + if (NearBlock) //Try again without a near hint + return allocateMappedMemory(NumBytes, 0, PFlags, EC); + + EC = error_code(errno, system_category()); + return MemoryBlock(); + } + + MemoryBlock Result; + Result.Address = Addr; + Result.Size = NumPages*PageSize; + + if (PFlags & MF_EXEC) + Memory::InvalidateInstructionCache(Result.Address, Result.Size); + + return Result; +} + +error_code +Memory::releaseMappedMemory(MemoryBlock &M) { + if (M.Address == 0 || M.Size == 0) + return error_code::success(); + + if (0 != ::munmap(M.Address, M.Size)) + return error_code(errno, system_category()); + + M.Address = 0; + M.Size = 0; + + return error_code::success(); +} + +error_code +Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { + if (M.Address == 0 || M.Size == 0) + return error_code::success(); + + if (!Flags) + return error_code(EINVAL, generic_category()); + + int Protect = getPosixProtectionFlags(Flags); + + int Result = ::mprotect(M.Address, M.Size, Protect); + if (Result != 0) + return error_code(errno, system_category()); + + if (Flags & MF_EXEC) + Memory::InvalidateInstructionCache(M.Address, M.Size); + + return error_code::success(); +} + /// AllocateRWX - Allocate a slab of memory with read/write/execute /// permissions. This is typically used for JIT applications where we want /// to emit code to the memory then jump to it. Getting this type of memory /// is very OS specific. /// -llvm::sys::MemoryBlock -llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, - std::string *ErrMsg) { +MemoryBlock +Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, + std::string *ErrMsg) { if (NumBytes == 0) return MemoryBlock(); size_t pageSize = Process::GetPageSize(); @@ -78,7 +211,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); if (KERN_SUCCESS != kr) { MakeErrMsg(ErrMsg, "vm_protect max RX failed"); - return sys::MemoryBlock(); + return MemoryBlock(); } kr = vm_protect(mach_task_self(), (vm_address_t)pa, @@ -86,7 +219,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, VM_PROT_READ | VM_PROT_WRITE); if (KERN_SUCCESS != kr) { MakeErrMsg(ErrMsg, "vm_protect RW failed"); - return sys::MemoryBlock(); + return MemoryBlock(); } #endif @@ -97,17 +230,17 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, return result; } -bool llvm::sys::Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { +bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { if (M.Address == 0 || M.Size == 0) return false; if (0 != ::munmap(M.Address, M.Size)) return MakeErrMsg(ErrMsg, "Can't release RWX Memory"); return false; } -bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { +bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { #if defined(__APPLE__) && defined(__arm__) if (M.Address == 0 || M.Size == 0) return false; - sys::Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.Size); kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE); return KERN_SUCCESS == kr; @@ -116,10 +249,10 @@ bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { #endif } -bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { +bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { #if defined(__APPLE__) && defined(__arm__) if (M.Address == 0 || M.Size == 0) return false; - sys::Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.Size); kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); return KERN_SUCCESS == kr; @@ -128,7 +261,7 @@ bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { #endif } -bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) { +bool Memory::setRangeWritable(const void *Addr, size_t Size) { #if defined(__APPLE__) && defined(__arm__) kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, (vm_size_t)Size, 0, @@ -139,7 +272,7 @@ bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) { #endif } -bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) { +bool Memory::setRangeExecutable(const void *Addr, size_t Size) { #if defined(__APPLE__) && defined(__arm__) kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, (vm_size_t)Size, 0, @@ -149,3 +282,52 @@ bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) { return true; #endif } + +/// InvalidateInstructionCache - Before the JIT can run a block of code +/// that has been emitted it must invalidate the instruction cache on some +/// platforms. +void Memory::InvalidateInstructionCache(const void *Addr, + size_t Len) { + +// icache invalidation for PPC and ARM. +#if defined(__APPLE__) + +# if (defined(__POWERPC__) || defined (__ppc__) || \ + defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__) + sys_icache_invalidate(const_cast<void *>(Addr), Len); +# endif + +#else + +# if (defined(__POWERPC__) || defined (__ppc__) || \ + defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) + const size_t LineSize = 32; + + const intptr_t Mask = ~(LineSize - 1); + const intptr_t StartLine = ((intptr_t) Addr) & Mask; + const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; + + for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) + asm volatile("dcbf 0, %0" : : "r"(Line)); + asm volatile("sync"); + + for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) + asm volatile("icbi 0, %0" : : "r"(Line)); + asm volatile("isync"); +# elif defined(__arm__) && defined(__GNUC__) + // FIXME: Can we safely always call this for __GNUC__ everywhere? + const char *Start = static_cast<const char *>(Addr); + const char *End = Start + Len; + __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); +# elif defined(__mips__) + const char *Start = static_cast<const char *>(Addr); + cacheflush(const_cast<char *>(Start), Len, BCACHE); +# endif + +#endif // end apple + + ValgrindDiscardTranslations(Addr, Len); +} + +} // namespace sys +} // namespace llvm diff --git a/lib/Support/Unix/Path.inc b/lib/Support/Unix/Path.inc index 6bddbdf..6a5ebb8 100644 --- a/lib/Support/Unix/Path.inc +++ b/lib/Support/Unix/Path.inc @@ -261,7 +261,8 @@ Path::GetCurrentDirectory() { } #if defined(__FreeBSD__) || defined (__NetBSD__) || defined(__Bitrig__) || \ - defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__) + defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__) || \ + defined(__linux__) || defined(__CYGWIN__) static int test_dir(char buf[PATH_MAX], char ret[PATH_MAX], const char *dir, const char *bin) @@ -337,9 +338,17 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) { return Path(exe_path); #elif defined(__linux__) || defined(__CYGWIN__) char exe_path[MAXPATHLEN]; - ssize_t len = readlink("/proc/self/exe", exe_path, sizeof(exe_path)); - if (len >= 0) - return Path(StringRef(exe_path, len)); + StringRef aPath("/proc/self/exe"); + if (sys::fs::exists(aPath)) { + // /proc is not always mounted under Linux (chroot for example). + ssize_t len = readlink(aPath.str().c_str(), exe_path, sizeof(exe_path)); + if (len >= 0) + return Path(StringRef(exe_path, len)); + } else { + // Fall back to the classical detection. + if (getprogpath(exe_path, argv0) != NULL) + return Path(exe_path); + } #elif defined(HAVE_DLFCN_H) // Use dladdr to get executable path if available. Dl_info DLInfo; diff --git a/lib/Support/Unix/Signals.inc b/lib/Support/Unix/Signals.inc index 5195116..9e94068 100644 --- a/lib/Support/Unix/Signals.inc +++ b/lib/Support/Unix/Signals.inc @@ -121,17 +121,29 @@ static void UnregisterHandlers() { /// NB: This must be an async signal safe function. It cannot allocate or free /// memory, even in debug builds. static void RemoveFilesToRemove() { - // Note: avoid iterators in case of debug iterators that allocate or release + // We avoid iterators in case of debug iterators that allocate or release // memory. for (unsigned i = 0, e = FilesToRemove.size(); i != e; ++i) { - // Note that we don't want to use any external code here, and we don't care - // about errors. We're going to try as hard as we can as often as we need - // to to make these files go away. If these aren't files, too bad. - // - // We do however rely on a std::string implementation for which repeated - // calls to 'c_str()' don't allocate memory. We pre-call 'c_str()' on all - // of these strings to try to ensure this is safe. - unlink(FilesToRemove[i].c_str()); + // We rely on a std::string implementation for which repeated calls to + // 'c_str()' don't allocate memory. We pre-call 'c_str()' on all of these + // strings to try to ensure this is safe. + const char *path = FilesToRemove[i].c_str(); + + // Get the status so we can determine if it's a file or directory. If we + // can't stat the file, ignore it. + struct stat buf; + if (stat(path, &buf) != 0) + continue; + + // If this is not a regular file, ignore it. We want to prevent removal of + // special files like /dev/null, even if the compiler is being run with the + // super-user permissions. + if (!S_ISREG(buf.st_mode)) + continue; + + // Otherwise, remove the file. We ignore any errors here as there is nothing + // else we can do. + unlink(path); } } @@ -243,7 +255,7 @@ void llvm::sys::AddSignalHandler(void (*FnPtr)(void *), void *Cookie) { // On glibc systems we have the 'backtrace' function, which works nicely, but // doesn't demangle symbols. static void PrintStackTrace(void *) { -#ifdef HAVE_BACKTRACE +#if defined(HAVE_BACKTRACE) && defined(ENABLE_BACKTRACES) static void* StackTrace[256]; // Use backtrace() to output a backtrace on Linux systems with glibc. int depth = backtrace(StackTrace, @@ -293,7 +305,7 @@ static void PrintStackTrace(void *) { #endif } -/// PrintStackTraceOnErrorSignal - When an error signal (such as SIBABRT or +/// PrintStackTraceOnErrorSignal - When an error signal (such as SIGABRT or /// SIGSEGV) is delivered to the process, print a stack trace and then exit. void llvm::sys::PrintStackTraceOnErrorSignal() { AddSignalHandler(PrintStackTrace, 0); @@ -305,10 +317,10 @@ void llvm::sys::PrintStackTraceOnErrorSignal() { exception_mask_t mask = EXC_MASK_CRASH; - kern_return_t ret = task_set_exception_ports(self, + kern_return_t ret = task_set_exception_ports(self, mask, MACH_PORT_NULL, - EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, + EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, THREAD_STATE_NONE); (void)ret; } |