From 9f28abd980752efcf77578cd494f1015083c2a2b Mon Sep 17 00:00:00 2001 From: marcel Date: Mon, 7 Jul 2014 00:27:09 +0000 Subject: Remove ia64. This includes: o All directories named *ia64* o All files named *ia64* o All ia64-specific code guarded by __ia64__ o All ia64-specific makefile logic o Mention of ia64 in comments and documentation This excludes: o Everything under contrib/ o Everything under crypto/ o sys/xen/interface o sys/sys/elf_common.h Discussed at: BSDcan --- libexec/rtld-elf/ia64/Makefile.inc | 1 - libexec/rtld-elf/ia64/reloc.c | 658 ----------------------------------- libexec/rtld-elf/ia64/rtld_machdep.h | 74 ---- libexec/rtld-elf/ia64/rtld_start.S | 252 -------------- libexec/rtld-elf/rtld.c | 15 +- 5 files changed, 3 insertions(+), 997 deletions(-) delete mode 100644 libexec/rtld-elf/ia64/Makefile.inc delete mode 100644 libexec/rtld-elf/ia64/reloc.c delete mode 100644 libexec/rtld-elf/ia64/rtld_machdep.h delete mode 100644 libexec/rtld-elf/ia64/rtld_start.S (limited to 'libexec') diff --git a/libexec/rtld-elf/ia64/Makefile.inc b/libexec/rtld-elf/ia64/Makefile.inc deleted file mode 100644 index e8c0da7..0000000 --- a/libexec/rtld-elf/ia64/Makefile.inc +++ /dev/null @@ -1 +0,0 @@ -# $FreeBSD$ diff --git a/libexec/rtld-elf/ia64/reloc.c b/libexec/rtld-elf/ia64/reloc.c deleted file mode 100644 index 1a41cb3..0000000 --- a/libexec/rtld-elf/ia64/reloc.c +++ /dev/null @@ -1,658 +0,0 @@ -/*- - * Copyright 1996, 1997, 1998, 1999 John D. Polstra. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * $FreeBSD$ - */ - -/* - * Dynamic linker for ELF. - * - * John Polstra . - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "debug.h" -#include "rtld.h" - -extern Elf_Dyn _DYNAMIC; - -/* - * Macros for loading/storing unaligned 64-bit values. These are - * needed because relocations can point to unaligned data. This - * occurs in the DWARF2 exception frame tables generated by the - * compiler, for instance. - * - * We don't use these when relocating jump slots and GOT entries, - * since they are guaranteed to be aligned. - * - * XXX dfr stub for now. - */ -#define load64(p) (*(u_int64_t *) (p)) -#define store64(p, v) (*(u_int64_t *) (p) = (v)) - -/* Allocate an @fptr. */ - -#define FPTR_CHUNK_SIZE 64 - -struct fptr_chunk { - struct fptr fptrs[FPTR_CHUNK_SIZE]; -}; - -static struct fptr_chunk first_chunk; -static struct fptr_chunk *current_chunk = &first_chunk; -static struct fptr *next_fptr = &first_chunk.fptrs[0]; -static struct fptr *last_fptr = &first_chunk.fptrs[FPTR_CHUNK_SIZE]; - -/* - * We use static storage initially so that we don't have to call - * malloc during init_rtld(). - */ -static struct fptr * -alloc_fptr(Elf_Addr target, Elf_Addr gp) -{ - struct fptr* fptr; - - if (next_fptr == last_fptr) { - current_chunk = xmalloc(sizeof(struct fptr_chunk)); - next_fptr = ¤t_chunk->fptrs[0]; - last_fptr = ¤t_chunk->fptrs[FPTR_CHUNK_SIZE]; - } - fptr = next_fptr; - next_fptr++; - fptr->target = target; - fptr->gp = gp; - return fptr; -} - -static struct fptr ** -alloc_fptrs(Obj_Entry *obj, bool mapped) -{ - struct fptr **fptrs; - size_t fbytes; - - fbytes = obj->dynsymcount * sizeof(struct fptr *); - - /* - * Avoid malloc, if requested. Happens when relocating - * rtld itself on startup. - */ - if (mapped) { - fptrs = mmap(NULL, fbytes, PROT_READ|PROT_WRITE, - MAP_ANON, -1, 0); - if (fptrs == MAP_FAILED) - fptrs = NULL; - } else { - fptrs = xcalloc(1, fbytes); - } - - /* - * This assertion is necessary to guarantee function pointer - * uniqueness - */ - assert(fptrs != NULL); - - return (obj->priv = fptrs); -} - -static void -free_fptrs(Obj_Entry *obj, bool mapped) -{ - struct fptr **fptrs; - size_t fbytes; - - fptrs = obj->priv; - if (fptrs == NULL) - return; - - fbytes = obj->dynsymcount * sizeof(struct fptr *); - if (mapped) - munmap(fptrs, fbytes); - else - free(fptrs); - obj->priv = NULL; -} - -/* Relocate a non-PLT object with addend. */ -static int -reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela, - SymCache *cache, int flags, RtldLockState *lockstate) -{ - struct fptr **fptrs; - Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset); - - switch (ELF_R_TYPE(rela->r_info)) { - case R_IA_64_REL64LSB: - /* - * We handle rtld's relocations in rtld_start.S - */ - if (obj != obj_rtld) - store64(where, - load64(where) + (Elf_Addr) obj->relocbase); - break; - - case R_IA_64_DIR64LSB: { - const Elf_Sym *def; - const Obj_Entry *defobj; - Elf_Addr target; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - return -1; - - target = (def->st_shndx != SHN_UNDEF) - ? (Elf_Addr)(defobj->relocbase + def->st_value) : 0; - store64(where, target + rela->r_addend); - break; - } - - case R_IA_64_FPTR64LSB: { - /* - * We have to make sure that all @fptr references to - * the same function are identical so that code can - * compare function pointers. - */ - const Elf_Sym *def; - const Obj_Entry *defobj; - struct fptr *fptr = 0; - Elf_Addr target, gp; - int sym_index; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - SYMLOOK_IN_PLT | flags, cache, lockstate); - if (def == NULL) { - /* - * XXX r_debug_state is problematic and find_symdef() - * returns NULL for it. This probably has something to - * do with symbol versioning (r_debug_state is in the - * symbol map). If we return -1 in that case we abort - * relocating rtld, which typically is fatal. So, for - * now just skip the symbol when we're relocating - * rtld. We don't care about r_debug_state unless we - * are being debugged. - */ - if (obj != obj_rtld) - return -1; - break; - } - - if (def->st_shndx != SHN_UNDEF) { - target = (Elf_Addr)(defobj->relocbase + def->st_value); - gp = (Elf_Addr)defobj->pltgot; - - /* rtld is allowed to reference itself only */ - assert(!obj->rtld || obj == defobj); - fptrs = defobj->priv; - if (fptrs == NULL) - fptrs = alloc_fptrs((Obj_Entry *) defobj, - obj->rtld); - - sym_index = def - defobj->symtab; - - /* - * Find the @fptr, using fptrs as a helper. - */ - if (fptrs) - fptr = fptrs[sym_index]; - if (!fptr) { - fptr = alloc_fptr(target, gp); - if (fptrs) - fptrs[sym_index] = fptr; - } - } else - fptr = NULL; - - store64(where, (Elf_Addr)fptr); - break; - } - - case R_IA_64_IPLTLSB: { - /* - * Relocation typically used to populate C++ virtual function - * tables. It creates a 128-bit function descriptor at the - * specified memory address. - */ - const Elf_Sym *def; - const Obj_Entry *defobj; - struct fptr *fptr; - Elf_Addr target, gp; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - return -1; - - if (def->st_shndx != SHN_UNDEF) { - target = (Elf_Addr)(defobj->relocbase + def->st_value); - gp = (Elf_Addr)defobj->pltgot; - } else { - target = 0; - gp = 0; - } - - fptr = (void*)where; - store64(&fptr->target, target); - store64(&fptr->gp, gp); - break; - } - - case R_IA_64_DTPMOD64LSB: { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - return -1; - - store64(where, defobj->tlsindex); - break; - } - - case R_IA_64_DTPREL64LSB: { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - return -1; - - store64(where, def->st_value + rela->r_addend); - break; - } - - case R_IA_64_TPREL64LSB: { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - return -1; - - /* - * We lazily allocate offsets for static TLS as we - * see the first relocation that references the - * TLS block. This allows us to support (small - * amounts of) static TLS in dynamically loaded - * modules. If we run out of space, we generate an - * error. - */ - if (!defobj->tls_done) { - if (!allocate_tls_offset((Obj_Entry*) defobj)) { - _rtld_error("%s: No space available for static " - "Thread Local Storage", obj->path); - return -1; - } - } - - store64(where, defobj->tlsoffset + def->st_value + rela->r_addend); - break; - } - - case R_IA_64_NONE: - break; - - default: - _rtld_error("%s: Unsupported relocation type %u" - " in non-PLT relocations\n", obj->path, - (unsigned int)ELF_R_TYPE(rela->r_info)); - return -1; - } - - return(0); -} - -/* Process the non-PLT relocations. */ -int -reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, - RtldLockState *lockstate) -{ - const Elf_Rel *rellim; - const Elf_Rel *rel; - const Elf_Rela *relalim; - const Elf_Rela *rela; - SymCache *cache; - int bytes = obj->dynsymcount * sizeof(SymCache); - int r = -1; - - /* - * The dynamic loader may be called from a thread, we have - * limited amounts of stack available so we cannot use alloca(). - */ - cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0); - if (cache == MAP_FAILED) - cache = NULL; - - /* Perform relocations without addend if there are any: */ - rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize); - for (rel = obj->rel; obj->rel != NULL && rel < rellim; rel++) { - Elf_Rela locrela; - - locrela.r_info = rel->r_info; - locrela.r_offset = rel->r_offset; - locrela.r_addend = 0; - if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache, flags, - lockstate)) - goto done; - } - - /* Perform relocations with addend if there are any: */ - relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize); - for (rela = obj->rela; obj->rela != NULL && rela < relalim; rela++) { - if (reloc_non_plt_obj(obj_rtld, obj, rela, cache, flags, - lockstate)) - goto done; - } - - r = 0; -done: - if (cache) - munmap(cache, bytes); - - /* - * Release temporarily mapped fptrs if relocating - * rtld object itself. A new table will be created - * in make_function_pointer using malloc when needed. - */ - if (obj->rtld && obj->priv) - free_fptrs(obj, true); - - return (r); -} - -/* Process the PLT relocations. */ -int -reloc_plt(Obj_Entry *obj) -{ - /* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */ - if (obj->pltrelsize != 0) { - const Elf_Rel *rellim; - const Elf_Rel *rel; - - rellim = (const Elf_Rel *) - ((char *)obj->pltrel + obj->pltrelsize); - for (rel = obj->pltrel; rel < rellim; rel++) { - Elf_Addr *where; - - assert(ELF_R_TYPE(rel->r_info) == R_IA_64_IPLTLSB); - - /* Relocate the @fptr pointing into the PLT. */ - where = (Elf_Addr *)(obj->relocbase + rel->r_offset); - *where += (Elf_Addr)obj->relocbase; - } - } else { - const Elf_Rela *relalim; - const Elf_Rela *rela; - - relalim = (const Elf_Rela *) - ((char *)obj->pltrela + obj->pltrelasize); - for (rela = obj->pltrela; rela < relalim; rela++) { - Elf_Addr *where; - - assert(ELF_R_TYPE(rela->r_info) == R_IA_64_IPLTLSB); - - /* Relocate the @fptr pointing into the PLT. */ - where = (Elf_Addr *)(obj->relocbase + rela->r_offset); - *where += (Elf_Addr)obj->relocbase; - } - } - return 0; -} - -int -reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate) -{ - - /* XXX not implemented */ - return (0); -} - -int -reloc_gnu_ifunc(Obj_Entry *obj, int flags, - struct Struct_RtldLockState *lockstate) -{ - - /* XXX not implemented */ - return (0); -} - -/* Relocate the jump slots in an object. */ -int -reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate) -{ - if (obj->jmpslots_done) - return 0; - /* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */ - if (obj->pltrelsize != 0) { - const Elf_Rel *rellim; - const Elf_Rel *rel; - - rellim = (const Elf_Rel *) - ((char *)obj->pltrel + obj->pltrelsize); - for (rel = obj->pltrel; rel < rellim; rel++) { - Elf_Addr *where; - const Elf_Sym *def; - const Obj_Entry *defobj; - - assert(ELF_R_TYPE(rel->r_info) == R_IA_64_IPLTLSB); - where = (Elf_Addr *)(obj->relocbase + rel->r_offset); - def = find_symdef(ELF_R_SYM(rel->r_info), obj, - &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate); - if (def == NULL) - return -1; - reloc_jmpslot(where, - (Elf_Addr)(defobj->relocbase - + def->st_value), - defobj, obj, rel); - } - } else { - const Elf_Rela *relalim; - const Elf_Rela *rela; - - relalim = (const Elf_Rela *) - ((char *)obj->pltrela + obj->pltrelasize); - for (rela = obj->pltrela; rela < relalim; rela++) { - Elf_Addr *where; - const Elf_Sym *def; - const Obj_Entry *defobj; - - where = (Elf_Addr *)(obj->relocbase + rela->r_offset); - def = find_symdef(ELF_R_SYM(rela->r_info), obj, - &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate); - if (def == NULL) - return -1; - reloc_jmpslot(where, - (Elf_Addr)(defobj->relocbase - + def->st_value), - defobj, obj, (Elf_Rel *)rela); - } - } - obj->jmpslots_done = true; - return 0; -} - -/* Fixup the jump slot at "where" to transfer control to "target". */ -Elf_Addr -reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj, - const Obj_Entry *refobj, const Elf_Rel *rel) -{ - Elf_Addr stubaddr; - - dbg(" reloc_jmpslot: where=%p, target=%p, gp=%p", - (void *)where, (void *)target, (void *)obj->pltgot); - stubaddr = *where; - if (stubaddr != target) { - - /* - * Point this @fptr directly at the target. Update the - * gp value first so that we don't break another cpu - * which is currently executing the PLT entry. - */ - where[1] = (Elf_Addr) obj->pltgot; - ia64_mf(); - where[0] = target; - ia64_mf(); - } - - /* - * The caller needs an @fptr for the adjusted entry. The PLT - * entry serves this purpose nicely. - */ - return (Elf_Addr) where; -} - -/* - * XXX ia64 doesn't seem to have copy relocations. - * - * Returns 0 on success, -1 on failure. - */ -int -do_copy_relocations(Obj_Entry *dstobj) -{ - - return 0; -} - -/* - * Return the @fptr representing a given function symbol. - */ -void * -make_function_pointer(const Elf_Sym *sym, const Obj_Entry *obj) -{ - struct fptr **fptrs = obj->priv; - int index = sym - obj->symtab; - - if (!fptrs) { - /* - * This should only happen for something like - * dlsym("dlopen"). Actually, I'm not sure it can ever - * happen. - */ - fptrs = alloc_fptrs((Obj_Entry *) obj, false); - } - if (!fptrs[index]) { - Elf_Addr target, gp; - target = (Elf_Addr) (obj->relocbase + sym->st_value); - gp = (Elf_Addr) obj->pltgot; - fptrs[index] = alloc_fptr(target, gp); - } - return fptrs[index]; -} - -void -call_initfini_pointer(const Obj_Entry *obj, Elf_Addr target) -{ - struct fptr fptr; - - fptr.gp = (Elf_Addr) obj->pltgot; - fptr.target = target; - dbg(" initfini: target=%p, gp=%p", - (void *) fptr.target, (void *) fptr.gp); - ((InitFunc) &fptr)(); -} - -void -call_init_pointer(const Obj_Entry *obj, Elf_Addr target) -{ - struct fptr fptr; - - fptr.gp = (Elf_Addr) obj->pltgot; - fptr.target = target; - dbg(" initfini: target=%p, gp=%p", - (void *) fptr.target, (void *) fptr.gp); - ((InitArrFunc) &fptr)(main_argc, main_argv, environ); -} - -/* Initialize the special PLT entries. */ -void -init_pltgot(Obj_Entry *obj) -{ - const Elf_Dyn *dynp; - Elf_Addr *pltres = 0; - - /* - * When there are no PLT relocations, the DT_IA_64_PLT_RESERVE entry - * is bogus. Do not setup the BOR pointers in that case. An example - * of where this happens is /usr/lib/libxpg4.so.3. - */ - if (obj->pltrelasize == 0 && obj->pltrelsize == 0) - return; - - /* - * Find the PLT RESERVE section. - */ - for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) { - if (dynp->d_tag == DT_IA_64_PLT_RESERVE) - pltres = (u_int64_t *) - (obj->relocbase + dynp->d_un.d_ptr); - } - if (!pltres) - errx(1, "Can't find DT_IA_64_PLT_RESERVE entry"); - - /* - * The PLT RESERVE section is used to get values to pass to - * _rtld_bind when lazy binding. - */ - pltres[0] = (Elf_Addr) obj; - pltres[1] = FPTR_TARGET(_rtld_bind_start); - pltres[2] = FPTR_GP(_rtld_bind_start); -} - -void -allocate_initial_tls(Obj_Entry *list) -{ - void *tpval; - - /* - * Fix the size of the static TLS block by using the maximum - * offset allocated so far and adding a bit for dynamic modules to - * use. - */ - tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA; - - tpval = allocate_tls(list, NULL, TLS_TCB_SIZE, 16); - __asm __volatile("mov r13 = %0" :: "r"(tpval)); -} - -void *__tls_get_addr(unsigned long module, unsigned long offset) -{ - register Elf_Addr** tp __asm__("r13"); - - return tls_get_addr_common(tp, module, offset); -} diff --git a/libexec/rtld-elf/ia64/rtld_machdep.h b/libexec/rtld-elf/ia64/rtld_machdep.h deleted file mode 100644 index b00a90fc..0000000 --- a/libexec/rtld-elf/ia64/rtld_machdep.h +++ /dev/null @@ -1,74 +0,0 @@ -/*- - * Copyright (c) 1999, 2000 John D. Polstra. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#ifndef RTLD_MACHDEP_H -#define RTLD_MACHDEP_H 1 - -#include -#include - -/* - * Macros for cracking ia64 function pointers. - */ -struct fptr { - Elf_Addr target; - Elf_Addr gp; -}; - -#define FPTR_TARGET(f) (((struct fptr *) (f))->target) -#define FPTR_GP(f) (((struct fptr *) (f))->gp) - -/* Return the address of the .dynamic section in the dynamic linker. */ -#define rtld_dynamic(obj) (&_DYNAMIC) - -struct Struct_Obj_Entry; - -Elf_Addr reloc_jmpslot(Elf_Addr *, Elf_Addr, const struct Struct_Obj_Entry *, - const struct Struct_Obj_Entry *, const Elf_Rel *); -void *make_function_pointer(const Elf_Sym *, const struct Struct_Obj_Entry *); -void call_initfini_pointer(const struct Struct_Obj_Entry *, Elf_Addr); -void call_init_pointer(const struct Struct_Obj_Entry *, Elf_Addr); - -#define TLS_TCB_SIZE 16 - -#define round(size, align) \ - (((size) + (align) - 1) & ~((align) - 1)) -#define calculate_first_tls_offset(size, align) \ - round(TLS_TCB_SIZE, align) -#define calculate_tls_offset(prev_offset, prev_size, size, align) \ - round(prev_offset + prev_size, align) -#define calculate_tls_end(off, size) ((off) + (size)) - -extern void *__tls_get_addr(unsigned long module, unsigned long offset); - -#define RTLD_DEFAULT_STACK_PF_EXEC 0 -#define RTLD_DEFAULT_STACK_EXEC 0 - -#define RTLD_INIT_PAGESIZES_EARLY 1 - -#endif diff --git a/libexec/rtld-elf/ia64/rtld_start.S b/libexec/rtld-elf/ia64/rtld_start.S deleted file mode 100644 index f41e5e5..0000000 --- a/libexec/rtld-elf/ia64/rtld_start.S +++ /dev/null @@ -1,252 +0,0 @@ -/* $FreeBSD$ */ -/* From: NetBSD: rtld_start.S,v 1.1 1996/12/16 20:38:09 cgd Exp */ - -/* - * Copyright 1996 Matt Thomas - * Copyright 2000 John D. Polstra - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - -ENTRY(_rtld_start, 0) - alloc r2=ar.pfs,3,0,3,0 - ;; -1: mov r14=ip // calculate gp - addl r3=@gprel(1b),r0 - ;; - sub gp=r14,r3 - ;; - .section .sdata -2: data4 @ltv(1b) // unrelocated address of 1b - .align 8 - .previous - add r15=@gprel(2b),gp - ;; - ld8 r15=[r15] - ;; - sub out0=r14,r15 // out0 is image base address - br.call.sptk.many rp=_rtld_reloc // fixup image - - add sp=-16,sp // 16 bytes for us, 16 for _rtld - ;; - mov out0=in0 - add out1=16,sp // address for exit proc - add out2=24,sp // address for obj_main - - br.call.sptk.many rp=_rtld // r8=_rtld(sp, &exit_proc, &obj_main) - - add r16=16,sp // address for exit proc - ;; - ld8 r15=[r16] // read exit proc - add sp=16,sp // readjust stack - mov b7=r8 // address of real _start - ;; - alloc r2=ar.pfs,0,0,3,0 // dump register frame - mov out2=r15 - ;; - br.call.sptk.many rp=b7 // transfer to main program - br.call.sptk.many rp=exit // die -END(_rtld_start) - -/* - * _rtld_bind_start: lookup a lazy binding and transfer to real target - * - * Arguments: - * r1 gp value for rtld - * r15 Index in plt - * r16 Obj_Entry of caller - * in0-in7 Arguments for target procedure - * rp Return address back to caller - */ -ENTRY(_rtld_bind_start, 0) -{ .mii - alloc loc0=ar.pfs,8,6,3,0 // space to save r8-r11 - add r17=16-8*16,sp // leave 16 bytes for _rtld_bind - add r18=32-8*16,sp - ;; -} { .mii - mov loc2=r8 // structure return address - add sp=-8*16,sp // space to save f8-f15 - mov loc1=rp - ;; -} { .mii - stf.spill [r17]=f8,32 // save float arguments - mov loc3=r9 // language specific - mov loc4=r10 // language specific -} { .mii - stf.spill [r18]=f9,32 - mov loc5=r11 // language specific - shl out1=r15,4 // 16 * index - ;; -} { .mmi - stf.spill [r17]=f10,32 - stf.spill [r18]=f11,32 - mov out0=r16 // Obj_Entry for caller - ;; -} { .mmi - stf.spill [r17]=f12,32 - stf.spill [r18]=f13,32 - shladd out1=r15,3,out1 // rela offset = 24 * index - ;; -} { .mmb - stf.spill [r17]=f14,32 - stf.spill [r18]=f15,32 - br.call.sptk.many rp=_rtld_bind -} { .mii - ld8 r14=[r8],8 // target address - add r17=16,sp - add r18=32,sp - ;; -} { .mii - ld8 r1=[r8] // target gp - mov ar.pfs=loc0 // clean up - mov rp=loc1 -} { .mmi - ldf.fill f8=[r17],32 // restore float arguments - ldf.fill f9=[r18],32 - mov r8=loc2 // restore structure pointer - ;; -} { .mmi - ldf.fill f10=[r17],32 - ldf.fill f11=[r18],32 - mov r9=loc3 - ;; -} { .mmi - ldf.fill f12=[r17],32 - ldf.fill f13=[r18],32 - mov r10=loc4 - ;; -} { .mmi - ldf.fill f14=[r17],32 - ldf.fill f15=[r18],32 - mov r11=loc5 - ;; -} { .mii - nop.m 0 - mov b7=r14 - add sp=8*16,sp - ;; -} { .mib - alloc r14=ar.pfs,0,0,8,0 // drop our register frame - nop.i 0 - br.sptk.many b7 // jump to target -} -END(_rtld_bind_start) - -#define DT_NULL 0 /* Terminating entry. */ -#define DT_RELA 7 /* Address of ElfNN_Rela relocations. */ -#define DT_RELASZ 8 /* Total size of ElfNN_Rela relocations. */ -#define DT_RELAENT 9 /* Size of each ElfNN_Rela relocation entry. */ - -#define R_IA_64_NONE 0 /* None */ -#define R_IA_64_DIR64LSB 0x27 /* word64 LSB S + A */ -#define R_IA_64_REL64LSB 0x6f /* word64 LSB BD + A */ - -/* - * _rtld_reloc: relocate the rtld image, apart from @fptrs. - * - * Assumes that rtld was linked at zero and that we only need to - * handle REL64LSB and DIR64LSB relocations. - * - * Arguments: - * r1 gp value for rtld - * in0 rtld base address - */ -STATIC_ENTRY(_rtld_reloc, 1) - alloc loc0=ar.pfs,1,2,0,0 - mov loc1=rp - ;; - movl r15=@gprel(_DYNAMIC) // find _DYNAMIC etc. - ;; - add r15=r15,gp // relocate _DYNAMIC etc. - ;; -1: ld8 r16=[r15],8 // read r15->d_tag - ;; - ld8 r17=[r15],8 // and r15->d_val - ;; - cmp.eq p6,p0=DT_NULL,r16 // done? -(p6) br.cond.dpnt.few 2f - ;; - cmp.eq p6,p0=DT_RELA,r16 - ;; -(p6) add r18=r17,in0 // found rela section - ;; - cmp.eq p6,p0=DT_RELASZ,r16 - ;; -(p6) mov r19=r17 // found rela size - ;; - cmp.eq p6,p0=DT_RELAENT,r16 - ;; -(p6) mov r22=r17 // found rela entry size - ;; - br.sptk.few 1b - -2: - ld8 r15=[r18],8 // read r_offset - ;; - ld8 r16=[r18],8 // read r_info - add r15=r15,in0 // relocate r_offset - ;; - ld8 r17=[r18],8 // read r_addend - sub r19=r19,r22 // update relasz - - extr.u r23=r16,0,32 // ELF64_R_TYPE(r16) - ;; - cmp.eq p6,p0=R_IA_64_NONE,r23 -(p6) br.cond.dpnt.few 3f - ;; - cmp.eq p6,p0=R_IA_64_DIR64LSB,r23 - ;; -(p6) br.cond.dptk.few 4f - ;; - cmp.eq p6,p0=R_IA_64_REL64LSB,r23 - ;; -(p6) br.cond.dptk.few 4f - ;; - -3: cmp.ltu p6,p0=0,r19 // more? -(p6) br.cond.dptk.few 2b // loop - - mov r8=0 // success return value - ;; - br.cond.sptk.few 9f // done - -4: - ld8 r16=[r15] // read value - ;; - add r16=r16,in0 // relocate it - ;; - st8 [r15]=r16 // and store it back - br.cond.sptk.few 3b - -9: - mov ar.pfs=loc0 - mov rp=loc1 - ;; - br.ret.sptk.few rp - -END(_rtld_reloc) diff --git a/libexec/rtld-elf/rtld.c b/libexec/rtld-elf/rtld.c index 0c53a16..03c92d0 100644 --- a/libexec/rtld-elf/rtld.c +++ b/libexec/rtld-elf/rtld.c @@ -3084,9 +3084,7 @@ do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve, const Elf_Sym *def; SymLook req; RtldLockState lockstate; -#ifndef __ia64__ tls_index ti; -#endif int res; def = NULL; @@ -3191,24 +3189,17 @@ do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve, /* * The value required by the caller is derived from the value - * of the symbol. For the ia64 architecture, we need to - * construct a function descriptor which the caller can use to - * call the function with the right 'gp' value. For other - * architectures and for non-functions, the value is simply - * the relocated value of the symbol. + * of the symbol. this is simply the relocated value of the + * symbol. */ if (ELF_ST_TYPE(def->st_info) == STT_FUNC) return (make_function_pointer(def, defobj)); else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) return (rtld_resolve_ifunc(defobj, def)); else if (ELF_ST_TYPE(def->st_info) == STT_TLS) { -#ifdef __ia64__ - return (__tls_get_addr(defobj->tlsindex, def->st_value)); -#else ti.ti_module = defobj->tlsindex; ti.ti_offset = def->st_value; return (__tls_get_addr(&ti)); -#endif } else return (defobj->relocbase + def->st_value); } @@ -4341,7 +4332,7 @@ tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset) return (tls_get_addr_slow(dtvp, index, offset)); } -#if defined(__arm__) || defined(__ia64__) || defined(__mips__) || defined(__powerpc__) +#if defined(__arm__) || defined(__mips__) || defined(__powerpc__) /* * Allocate Static TLS using the Variant I method. -- cgit v1.1