summaryrefslogtreecommitdiffstats
path: root/libexec/rtld-elf/ia64
diff options
context:
space:
mode:
authordfr <dfr@FreeBSD.org>2001-10-15 18:48:42 +0000
committerdfr <dfr@FreeBSD.org>2001-10-15 18:48:42 +0000
commit7d69aa453630c63bba640d287d0781db518b5a53 (patch)
tree58745c0bcc233d2e6a7a5aae303d27432c4dd053 /libexec/rtld-elf/ia64
parent8d45c0568791bbcc1147afa6e857e7b307c43c11 (diff)
downloadFreeBSD-src-7d69aa453630c63bba640d287d0781db518b5a53.zip
FreeBSD-src-7d69aa453630c63bba640d287d0781db518b5a53.tar.gz
Add ia64 support. Various adjustments were made to existing targets to
cope with a few interface changes required by the ia64. In particular, function pointers on ia64 need special treatment in rtld.
Diffstat (limited to 'libexec/rtld-elf/ia64')
-rw-r--r--libexec/rtld-elf/ia64/Makefile.inc2
-rw-r--r--libexec/rtld-elf/ia64/lockdflt.c181
-rw-r--r--libexec/rtld-elf/ia64/reloc.c435
-rw-r--r--libexec/rtld-elf/ia64/rtld_machdep.h57
-rw-r--r--libexec/rtld-elf/ia64/rtld_start.S306
5 files changed, 981 insertions, 0 deletions
diff --git a/libexec/rtld-elf/ia64/Makefile.inc b/libexec/rtld-elf/ia64/Makefile.inc
new file mode 100644
index 0000000..3ea0ac3
--- /dev/null
+++ b/libexec/rtld-elf/ia64/Makefile.inc
@@ -0,0 +1,2 @@
+# $FreeBSD$
+LDFLAGS+= -Wl,--export-dynamic
diff --git a/libexec/rtld-elf/ia64/lockdflt.c b/libexec/rtld-elf/ia64/lockdflt.c
new file mode 100644
index 0000000..5847abb
--- /dev/null
+++ b/libexec/rtld-elf/ia64/lockdflt.c
@@ -0,0 +1,181 @@
+/*-
+ * Copyright 1999, 2000 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Thread locking implementation for the dynamic linker.
+ *
+ * We use the "simple, non-scalable reader-preference lock" from:
+ *
+ * J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer
+ * Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on
+ * Principles and Practice of Parallel Programming, April 1991.
+ *
+ * In this algorithm the lock is a single word. Its low-order bit is
+ * set when a writer holds the lock. The remaining high-order bits
+ * contain a count of readers desiring the lock. The algorithm requires
+ * atomic "compare_and_store" and "add" operations, which we implement
+ * using assembly language sequences in "rtld_start.S".
+ *
+ * These are spinlocks. When spinning we call nanosleep() for 1
+ * microsecond each time around the loop. This will most likely yield
+ * the CPU to other threads (including, we hope, the lockholder) allowing
+ * them to make some progress.
+ */
+
+#include <signal.h>
+#include <stdlib.h>
+#include <time.h>
+
+#include "debug.h"
+#include "rtld.h"
+
+/*
+ * This value of CACHE_LINE_SIZE is conservative. The actual size
+ * is 32 on the 21064, 21064A, 21066, 21066A, and 21164. It is 64
+ * on the 21264. Compaq recommends sequestering each lock in its own
+ * 128-byte block to allow for future implementations with larger
+ * cache lines.
+ */
+#define CACHE_LINE_SIZE 128
+
+#define WAFLAG 0x1 /* A writer holds the lock */
+#define RC_INCR 0x2 /* Adjusts count of readers desiring lock */
+
+typedef struct Struct_Lock {
+ volatile int lock;
+ void *base;
+} Lock;
+
+static const struct timespec usec = { 0, 1000 }; /* 1 usec. */
+static sigset_t fullsigmask, oldsigmask;
+
+static void *
+lock_create(void *context)
+{
+ void *base;
+ char *p;
+ uintptr_t r;
+ Lock *l;
+
+ /*
+ * Arrange for the lock to occupy its own cache line. First, we
+ * optimistically allocate just a cache line, hoping that malloc
+ * will give us a well-aligned block of memory. If that doesn't
+ * work, we allocate a larger block and take a well-aligned cache
+ * line from it.
+ */
+ base = xmalloc(CACHE_LINE_SIZE);
+ p = (char *)base;
+ if ((uintptr_t)p % CACHE_LINE_SIZE != 0) {
+ free(base);
+ base = xmalloc(2 * CACHE_LINE_SIZE);
+ p = (char *)base;
+ if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0)
+ p += CACHE_LINE_SIZE - r;
+ }
+ l = (Lock *)p;
+ l->base = base;
+ l->lock = 0;
+ return l;
+}
+
+static void
+lock_destroy(void *lock)
+{
+ Lock *l = (Lock *)lock;
+
+ free(l->base);
+}
+
+static void
+rlock_acquire(void *lock)
+{
+ Lock *l = (Lock *)lock;
+
+ atomic_add_int(&l->lock, RC_INCR);
+ while (l->lock & WAFLAG)
+ nanosleep(&usec, NULL);
+}
+
+static void
+wlock_acquire(void *lock)
+{
+ Lock *l = (Lock *)lock;
+ sigset_t tmp_oldsigmask;
+
+ for ( ; ; ) {
+ sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask);
+ if (cmp0_and_store_int(&l->lock, WAFLAG) == 0)
+ break;
+ sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
+ nanosleep(&usec, NULL);
+ }
+ oldsigmask = tmp_oldsigmask;
+}
+
+static void
+rlock_release(void *lock)
+{
+ Lock *l = (Lock *)lock;
+
+ atomic_add_int(&l->lock, -RC_INCR);
+}
+
+static void
+wlock_release(void *lock)
+{
+ Lock *l = (Lock *)lock;
+
+ atomic_add_int(&l->lock, -WAFLAG);
+ sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
+}
+
+void
+lockdflt_init(LockInfo *li)
+{
+ li->context = NULL;
+ li->lock_create = lock_create;
+ li->rlock_acquire = rlock_acquire;
+ li->wlock_acquire = wlock_acquire;
+ li->rlock_release = rlock_release;
+ li->wlock_release = wlock_release;
+ li->lock_destroy = lock_destroy;
+ li->context_destroy = NULL;
+ /*
+ * Construct a mask to block all signals except traps which might
+ * conceivably be generated within the dynamic linker itself.
+ */
+ sigfillset(&fullsigmask);
+ sigdelset(&fullsigmask, SIGILL);
+ sigdelset(&fullsigmask, SIGTRAP);
+ sigdelset(&fullsigmask, SIGABRT);
+ sigdelset(&fullsigmask, SIGEMT);
+ sigdelset(&fullsigmask, SIGFPE);
+ sigdelset(&fullsigmask, SIGBUS);
+ sigdelset(&fullsigmask, SIGSEGV);
+ sigdelset(&fullsigmask, SIGSYS);
+}
diff --git a/libexec/rtld-elf/ia64/reloc.c b/libexec/rtld-elf/ia64/reloc.c
new file mode 100644
index 0000000..86b7ce6
--- /dev/null
+++ b/libexec/rtld-elf/ia64/reloc.c
@@ -0,0 +1,435 @@
+/*-
+ * Copyright 1996, 1997, 1998, 1999 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Dynamic linker for ELF.
+ *
+ * John Polstra <jdp@polstra.com>.
+ */
+
+#include <sys/param.h>
+#include <sys/mman.h>
+
+#include <dlfcn.h>
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "debug.h"
+#include "rtld.h"
+
+extern Elf_Dyn _DYNAMIC;
+
+/*
+ * Macros for loading/storing unaligned 64-bit values. These are
+ * needed because relocations can point to unaligned data. This
+ * occurs in the DWARF2 exception frame tables generated by the
+ * compiler, for instance.
+ *
+ * We don't use these when relocating jump slots and GOT entries,
+ * since they are guaranteed to be aligned.
+ *
+ * XXX dfr stub for now.
+ */
+#define load64(p) (*(u_int64_t *) (p))
+#define store64(p, v) (*(u_int64_t *) (p) = (v))
+
+/* Allocate an @fptr. */
+
+#define FPTR_CHUNK_SIZE 64
+
+struct fptr_chunk {
+ struct fptr fptrs[FPTR_CHUNK_SIZE];
+};
+
+static struct fptr_chunk first_chunk;
+static struct fptr_chunk *current_chunk = &first_chunk;
+static struct fptr *next_fptr = &first_chunk.fptrs[0];
+static struct fptr *last_fptr = &first_chunk.fptrs[FPTR_CHUNK_SIZE];
+
+/*
+ * We use static storage initially so that we don't have to call
+ * malloc during init_rtld().
+ */
+static struct fptr *
+alloc_fptr(Elf_Addr target, Elf_Addr gp)
+{
+ struct fptr* fptr;
+
+ if (next_fptr == last_fptr) {
+ current_chunk = malloc(sizeof(struct fptr_chunk));
+ next_fptr = &current_chunk->fptrs[0];
+ last_fptr = &current_chunk->fptrs[FPTR_CHUNK_SIZE];
+ }
+ fptr = next_fptr;
+ next_fptr++;
+ fptr->target = target;
+ fptr->gp = gp;
+ return fptr;
+}
+
+/* Relocate a non-PLT object with addend. */
+static int
+reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela,
+ SymCache *cache, struct fptr **fptrs)
+{
+ Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset);
+
+ switch (ELF_R_TYPE(rela->r_info)) {
+ case R_IA64_REL64LSB:
+ /*
+ * We handle rtld's relocations in rtld_start.S
+ */
+ if (obj != obj_rtld)
+ store64(where,
+ load64(where) + (Elf_Addr) obj->relocbase);
+ break;
+
+ case R_IA64_DIR64LSB: {
+ const Elf_Sym *def;
+ const Obj_Entry *defobj;
+ Elf_Addr target;
+
+ def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
+ false, cache);
+ if (def == NULL)
+ return -1;
+ target = (Elf_Addr) (defobj->relocbase + def->st_value);
+ store64(where, target + rela->r_addend);
+ break;
+ }
+
+ case R_IA64_FPTR64LSB: {
+ /*
+ * We have to make sure that all @fptr references to
+ * the same function are identical so that code can
+ * compare function pointers. We actually only bother
+ * to ensure this within a single object. If the
+ * caller's alloca failed, we don't even ensure that.
+ */
+ const Elf_Sym *def;
+ const Obj_Entry *defobj;
+ struct fptr *fptr = 0;
+ Elf_Addr target, gp;
+
+ /*
+ * Not sure why the call to find_symdef() doesn't work
+ * properly (it fails if the symbol is local). Perhaps
+ * this is a toolchain issue - revisit after we
+ * upgrade the ia64 toolchain.
+ */
+ def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
+ false, cache);
+ if (def == NULL) {
+ def = &obj->symtab[ELF_R_SYM(rela->r_info)];
+ defobj = obj;
+ }
+ target = (Elf_Addr) (defobj->relocbase + def->st_value);
+ gp = (Elf_Addr) defobj->pltgot;
+
+ /*
+ * Find the @fptr, using fptrs as a helper.
+ */
+ if (fptrs)
+ fptr = fptrs[ELF_R_SYM(rela->r_info)];
+ if (!fptr) {
+ fptr = alloc_fptr(target, gp);
+ if (fptrs)
+ fptrs[ELF_R_SYM(rela->r_info)] = fptr;
+ }
+ store64(where, (Elf_Addr) fptr);
+ break;
+ }
+
+ default:
+ _rtld_error("%s: Unsupported relocation type %d"
+ " in non-PLT relocations\n", obj->path,
+ ELF_R_TYPE(rela->r_info));
+ return -1;
+ }
+
+ return(0);
+}
+
+/* Process the non-PLT relocations. */
+int
+reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
+{
+ const Elf_Rel *rellim;
+ const Elf_Rel *rel;
+ const Elf_Rela *relalim;
+ const Elf_Rela *rela;
+ SymCache *cache;
+ struct fptr **fptrs;
+
+ cache = (SymCache *)alloca(obj->nchains * sizeof(SymCache));
+ if (cache != NULL)
+ memset(cache, 0, obj->nchains * sizeof(SymCache));
+
+ /*
+ * When relocating rtld itself, we need to avoid using malloc.
+ */
+ if (obj == obj_rtld)
+ fptrs = (struct fptr **)
+ alloca(obj->nchains * sizeof(struct fptr *));
+ else
+ fptrs = (struct fptr **)
+ malloc(obj->nchains * sizeof(struct fptr *));
+
+ if (fptrs == NULL)
+ return -1;
+ memset(fptrs, 0, obj->nchains * sizeof(struct fptr *));
+
+ /* Perform relocations without addend if there are any: */
+ rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize);
+ for (rel = obj->rel; obj->rel != NULL && rel < rellim; rel++) {
+ Elf_Rela locrela;
+
+ locrela.r_info = rel->r_info;
+ locrela.r_offset = rel->r_offset;
+ locrela.r_addend = 0;
+ if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache, fptrs))
+ return -1;
+ }
+
+ /* Perform relocations with addend if there are any: */
+ relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize);
+ for (rela = obj->rela; obj->rela != NULL && rela < relalim; rela++) {
+ if (reloc_non_plt_obj(obj_rtld, obj, rela, cache, fptrs))
+ return -1;
+ }
+
+ /*
+ * Remember the fptrs in case of later calls to dlsym(). Don't
+ * bother for rtld - we will lazily create a table in
+ * make_function_pointer(). At this point we still can't risk
+ * calling malloc().
+ */
+ if (obj != obj_rtld)
+ obj->priv = fptrs;
+ else
+ obj->priv = NULL;
+
+ return 0;
+}
+
+/* Process the PLT relocations. */
+int
+reloc_plt(Obj_Entry *obj)
+{
+ /* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
+ if (obj->pltrelsize != 0) {
+ const Elf_Rel *rellim;
+ const Elf_Rel *rel;
+
+ rellim = (const Elf_Rel *)
+ ((char *)obj->pltrel + obj->pltrelsize);
+ for (rel = obj->pltrel; rel < rellim; rel++) {
+ Elf_Addr *where;
+
+ assert(ELF_R_TYPE(rel->r_info) == R_IA64_IPLTLSB);
+
+ /* Relocate the @fptr pointing into the PLT. */
+ where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
+ *where += (Elf_Addr)obj->relocbase;
+ }
+ } else {
+ const Elf_Rela *relalim;
+ const Elf_Rela *rela;
+
+ relalim = (const Elf_Rela *)
+ ((char *)obj->pltrela + obj->pltrelasize);
+ for (rela = obj->pltrela; rela < relalim; rela++) {
+ Elf_Addr *where;
+
+ assert(ELF_R_TYPE(rela->r_info) == R_IA64_IPLTLSB);
+
+ /* Relocate the @fptr pointing into the PLT. */
+ where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
+ *where += (Elf_Addr)obj->relocbase;
+ }
+ }
+ return 0;
+}
+
+/* Relocate the jump slots in an object. */
+int
+reloc_jmpslots(Obj_Entry *obj)
+{
+ if (obj->jmpslots_done)
+ return 0;
+ /* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
+ if (obj->pltrelsize != 0) {
+ const Elf_Rel *rellim;
+ const Elf_Rel *rel;
+
+ rellim = (const Elf_Rel *)
+ ((char *)obj->pltrel + obj->pltrelsize);
+ for (rel = obj->pltrel; rel < rellim; rel++) {
+ Elf_Addr *where;
+ const Elf_Sym *def;
+ const Obj_Entry *defobj;
+
+ assert(ELF_R_TYPE(rel->r_info) == R_IA64_IPLTLSB);
+ where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
+ def = find_symdef(ELF_R_SYM(rel->r_info), obj,
+ &defobj, true, NULL);
+ if (def == NULL)
+ return -1;
+ reloc_jmpslot(where,
+ (Elf_Addr)(defobj->relocbase
+ + def->st_value),
+ defobj);
+ }
+ } else {
+ const Elf_Rela *relalim;
+ const Elf_Rela *rela;
+
+ relalim = (const Elf_Rela *)
+ ((char *)obj->pltrela + obj->pltrelasize);
+ for (rela = obj->pltrela; rela < relalim; rela++) {
+ Elf_Addr *where;
+ const Elf_Sym *def;
+ const Obj_Entry *defobj;
+
+ /* assert(ELF_R_TYPE(rela->r_info) == R_ALPHA_JMP_SLOT); */
+ where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
+ def = find_symdef(ELF_R_SYM(rela->r_info), obj,
+ &defobj, true, NULL);
+ if (def == NULL)
+ return -1;
+ reloc_jmpslot(where,
+ (Elf_Addr)(defobj->relocbase
+ + def->st_value),
+ defobj);
+ }
+ }
+ obj->jmpslots_done = true;
+ return 0;
+}
+
+/* Fixup the jump slot at "where" to transfer control to "target". */
+Elf_Addr
+reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj)
+{
+ Elf_Addr stubaddr;
+
+ dbg(" reloc_jmpslot: where=%p, target=%p, gp=%p",
+ (void *)where, (void *)target, (void *)obj->pltgot);
+ stubaddr = *where;
+ if (stubaddr != target) {
+
+ /*
+ * Point this @fptr directly at the target. Update the
+ * gp value first so that we don't break another cpu
+ * which is currently executing the PLT entry.
+ */
+ where[1] = (Elf_Addr) obj->pltgot;
+ ia64_mf();
+ where[0] = target;
+ ia64_mf();
+ }
+
+ /*
+ * The caller needs an @fptr for the adjusted entry. The PLT
+ * entry serves this purpose nicely.
+ */
+ return (Elf_Addr) where;
+}
+
+/*
+ * XXX ia64 doesn't seem to have copy relocations.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+int
+do_copy_relocations(Obj_Entry *dstobj)
+{
+
+ return 0;
+}
+
+/*
+ * Return the @fptr representing a given function symbol.
+ */
+void *
+make_function_pointer(const Elf_Sym *sym, const Obj_Entry *obj)
+{
+ struct fptr **fptrs = obj->priv;
+ int index = sym - obj->symtab;
+
+ if (!fptrs) {
+ /*
+ * This should only happen for something like
+ * dlsym("dlopen"). Actually, I'm not sure it can ever
+ * happen.
+ */
+ fptrs = (struct fptr **)
+ malloc(obj->nchains * sizeof(struct fptr *));
+ memset(fptrs, 0, obj->nchains * sizeof(struct fptr *));
+ ((Obj_Entry*) obj)->priv = fptrs;
+ }
+ if (!fptrs[index]) {
+ Elf_Addr target, gp;
+ target = (Elf_Addr) (obj->relocbase + sym->st_value);
+ gp = (Elf_Addr) obj->pltgot;
+ fptrs[index] = alloc_fptr(target, gp);
+ }
+ return fptrs[index];
+}
+
+/* Initialize the special PLT entries. */
+void
+init_pltgot(Obj_Entry *obj)
+{
+ const Elf_Dyn *dynp;
+ Elf_Addr *pltres = 0;
+
+ /*
+ * Find the PLT RESERVE section.
+ */
+ for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
+ if (dynp->d_tag == DT_IA64_PLT_RESERVE)
+ pltres = (u_int64_t *)
+ (obj->relocbase + dynp->d_un.d_ptr);
+ }
+ if (!pltres)
+ errx(1, "Can't find DT_IA64_PLT_RESERVE entry");
+
+ /*
+ * The PLT RESERVE section is used to get values to pass to
+ * _rtld_bind when lazy binding.
+ */
+ pltres[0] = (Elf_Addr) obj;
+ pltres[1] = FPTR_TARGET(_rtld_bind_start);
+ pltres[2] = FPTR_GP(_rtld_bind_start);
+}
diff --git a/libexec/rtld-elf/ia64/rtld_machdep.h b/libexec/rtld-elf/ia64/rtld_machdep.h
new file mode 100644
index 0000000..74a3f62
--- /dev/null
+++ b/libexec/rtld-elf/ia64/rtld_machdep.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 1999, 2000 John D. Polstra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef RTLD_MACHDEP_H
+#define RTLD_MACHDEP_H 1
+
+/*
+ * Macros for cracking ia64 function pointers.
+ */
+struct fptr {
+ Elf_Addr target;
+ Elf_Addr gp;
+};
+
+#define FPTR_TARGET(f) (((struct fptr *) (f))->target)
+#define FPTR_GP(f) (((struct fptr *) (f))->gp)
+
+/* Return the address of the .dynamic section in the dynamic linker. */
+#define rtld_dynamic(obj) (&_DYNAMIC)
+
+struct Struct_Obj_Entry;
+
+Elf_Addr reloc_jmpslot(Elf_Addr *, Elf_Addr, const struct Struct_Obj_Entry *);
+void *make_function_pointer(const Elf_Sym *, const struct Struct_Obj_Entry *);
+
+/* Atomic operations. */
+int cmp0_and_store_int(volatile int *, int);
+void atomic_add_int(volatile int *, int);
+void atomic_incr_int(volatile int *);
+void atomic_decr_int(volatile int *);
+
+#endif
diff --git a/libexec/rtld-elf/ia64/rtld_start.S b/libexec/rtld-elf/ia64/rtld_start.S
new file mode 100644
index 0000000..6dbb1db
--- /dev/null
+++ b/libexec/rtld-elf/ia64/rtld_start.S
@@ -0,0 +1,306 @@
+/* $FreeBSD$ */
+/* From: NetBSD: rtld_start.S,v 1.1 1996/12/16 20:38:09 cgd Exp */
+
+/*
+ * Copyright 1996 Matt Thomas <matt@3am-software.com>
+ * Copyright 2000 John D. Polstra
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+#include <sys/syscall.h>
+
+ENTRY(_rtld_start, 0)
+ alloc r2=ar.pfs,3,0,3,0
+ ;;
+1: mov r14=ip // calculate gp
+ addl r3=@gprel(1b),r0
+ ;;
+ sub gp=r14,r3
+ ;;
+ .section .sdata
+2: data4 @ltv(1b) // unrelocated address of 1b
+ .align 8
+ .previous
+ add r15=@gprel(2b),gp
+ ;;
+ ld8 r15=[r15]
+ ;;
+ sub out0=r14,r15 // out0 is image base address
+ br.call.sptk.many rp=_rtld_reloc // fixup image
+
+ add sp=-16,sp // 16 bytes for us, 16 for _rtld
+ ;;
+ mov out0=in0
+ add out1=16,sp // address for exit proc
+ add out2=24,sp // address for obj_main
+
+ br.call.sptk.many rp=_rtld // r8=_rtld(sp, &exit_proc, &obj_main)
+
+ add r16=16,sp // address for exit proc
+ ;;
+ ld8 r15=[r16] // read exit proc
+ add sp=16,sp // readjust stack
+ mov b7=r8 // address of real _start
+ ;;
+ alloc r2=ar.pfs,0,0,3,0 // dump register frame
+ mov out2=r15
+
+ br.call.sptk.many rp=b7 // transfer to main program
+ br.call.sptk.many rp=exit // die
+END(_rtld_start)
+
+/*
+ * _rtld_bind_start: lookup a lazy binding and transfer to real target
+ *
+ * Arguments:
+ * r1 gp value for rtld
+ * r15 Index in plt
+ * r16 Obj_Entry of caller
+ * in0-in7 Arguments for target procedure
+ * rp Return address back to caller
+ */
+ENTRY(_rtld_bind_start, 0)
+{ .mii
+ alloc loc0=ar.pfs,8,6,3,0 // space to save r8-r11
+ add r17=16-8*16,sp // leave 16 bytes for _rtld_bind
+ add r18=32-8*16,sp
+ ;;
+} { .mii
+ mov loc2=r8 // structure return address
+ add sp=-8*16,sp // space to save f8-f15
+ mov loc1=rp
+ ;;
+} { .mii
+ stf.spill [r17]=f8,32 // save float arguments
+ mov loc3=r9 // language specific
+ mov loc4=r10 // language specific
+} { .mii
+ stf.spill [r18]=f9,32
+ mov loc5=r11 // language specific
+ shl out1=r15,4 // 16 * index
+ ;;
+} { .mmi
+ stf.spill [r17]=f10,32
+ stf.spill [r18]=f11,32
+ mov out0=r16 // Obj_Entry for caller
+ ;;
+} { .mmi
+ stf.spill [r17]=f12,32
+ stf.spill [r18]=f13,32
+ shladd out1=r15,3,out1 // rela offset = 24 * index
+ ;;
+} { .mmb
+ stf.spill [r17]=f14,32
+ stf.spill [r18]=f15,32
+ br.call.sptk.many rp=_rtld_bind
+} { .mii
+ ld8 r14=[r8],8 // target address
+ add r17=16,sp
+ add r18=32,sp
+ ;;
+} { .mii
+ ld8 r1=[r8] // target gp
+ mov ar.pfs=loc0 // clean up
+ mov rp=loc1
+} { .mmi
+ ldf.fill f8=[r17],32 // restore float arguments
+ ldf.fill f9=[r18],32
+ mov r8=loc2 // restore structure pointer
+ ;;
+} { .mmi
+ ldf.fill f10=[r17],32
+ ldf.fill f11=[r18],32
+ mov r9=loc3
+ ;;
+} { .mmi
+ ldf.fill f12=[r17],32
+ ldf.fill f13=[r18],32
+ mov r10=loc4
+ ;;
+} { .mmi
+ ldf.fill f14=[r17],32
+ ldf.fill f15=[r18],32
+ mov r11=loc5
+ ;;
+} { .mii
+ nop.m 0
+ mov b7=r14
+ add sp=8*16,sp
+ ;;
+} { .mib
+ alloc r14=ar.pfs,0,0,8,0 // drop our register frame
+ nop.i 0
+ br.sptk.many b7 // jump to target
+}
+END(_rtld_bind_start)
+
+/*
+ * int cmp0_and_store_int(volatile int *p, int newval);
+ *
+ * If an int holds 0, store newval into it; else do nothing. Returns
+ * the previous value.
+ */
+ENTRY(cmp0_and_store_int, 2)
+ mov ar.ccv=0
+ ;;
+ cmpxchg4.acq r8=[in0],in1,ar.ccv
+ br.ret.sptk.many rp
+END(cmp0_and_store_int)
+
+ENTRY(atomic_add_int, 2)
+1: ld4 r14=[in0]
+ ;;
+ mov ar.ccv=r14
+ add r15=in1,r14
+ ;;
+ cmpxchg4.acq r16=[in0],r15,ar.ccv
+ ;;
+ cmp.ne p6,p0=r14,r16
+(p6) br.cond.spnt.few 1b
+ br.ret.sptk.many rp
+END(atomic_add_int)
+
+/* Atomically increment an int. */
+ENTRY(atomic_incr_int, 1)
+1: ld4 r14=[in0]
+ ;;
+ mov ar.ccv=r14
+ add r15=1,r14
+ ;;
+ cmpxchg4.acq r16=[in0],r15,ar.ccv
+ ;;
+ cmp.ne p6,p0=r14,r16
+(p6) br.cond.spnt.few 1b
+ br.ret.sptk.many rp
+END(atomic_incr_int)
+
+/* Atomically decrement an int. */
+ENTRY(atomic_decr_int, 1)
+1: ld4 r14=[in0]
+ ;;
+ mov ar.ccv=r14
+ add r15=-1,r14
+ ;;
+ cmpxchg4.acq r16=[in0],r15,ar.ccv
+ ;;
+ cmp.ne p6,p0=r14,r16
+(p6) br.cond.spnt.few 1b
+ br.ret.sptk.many rp
+END(atomic_decr_int)
+
+#define DT_NULL 0 /* Terminating entry. */
+#define DT_RELA 7 /* Address of ElfNN_Rela relocations. */
+#define DT_RELASZ 8 /* Total size of ElfNN_Rela relocations. */
+#define DT_RELAENT 9 /* Size of each ElfNN_Rela relocation entry. */
+
+#define R_IA64_NONE 0 /* None */
+#define R_IA64_DIR64LSB 0x27 /* word64 LSB S + A */
+#define R_IA64_REL64LSB 0x6f /* word64 LSB BD + A */
+
+/*
+ * _rtld_reloc: relocate the rtld image, apart from @fptrs.
+ *
+ * Assumes that rtld was linked at zero and that we only need to
+ * handle REL64LSB and DIR64LSB relocations.
+ *
+ * Arguments:
+ * r1 gp value for rtld
+ * in0 rtld base address
+ */
+STATIC_ENTRY(_rtld_reloc, 1)
+ alloc loc0=ar.pfs,1,2,0,0
+ mov loc1=rp
+ ;;
+ movl r15=@gprel(_DYNAMIC) // find _DYNAMIC etc.
+ ;;
+ add r15=r15,gp // relocate _DYNAMIC etc.
+ ;;
+1: ld8 r16=[r15],8 // read r15->d_tag
+ ;;
+ ld8 r17=[r15],8 // and r15->d_val
+ ;;
+ cmp.eq p6,p0=DT_NULL,r16 // done?
+(p6) br.cond.dpnt.few 2f
+ ;;
+ cmp.eq p6,p0=DT_RELA,r16
+ ;;
+(p6) add r18=r17,in0 // found rela section
+ ;;
+ cmp.eq p6,p0=DT_RELASZ,r16
+ ;;
+(p6) mov r19=r17 // found rela size
+ ;;
+ cmp.eq p6,p0=DT_RELAENT,r16
+ ;;
+(p6) mov r22=r17 // found rela entry size
+ ;;
+ br.sptk.few 1b
+
+2:
+ ld8 r15=[r18],8 // read r_offset
+ ;;
+ ld8 r16=[r18],8 // read r_info
+ add r15=r15,in0 // relocate r_offset
+ ;;
+ ld8 r17=[r18],8 // read r_addend
+ sub r19=r19,r22 // update relasz
+
+ extr.u r23=r16,0,32 // ELF64_R_TYPE(r16)
+ ;;
+ cmp.eq p6,p0=R_IA64_NONE,r23
+(p6) br.cond.dpnt.few 3f
+ ;;
+ cmp.eq p6,p0=R_IA64_DIR64LSB,r23
+ ;;
+(p6) br.cond.dptk.few 4f
+ ;;
+ cmp.eq p6,p0=R_IA64_REL64LSB,r23
+ ;;
+(p6) br.cond.dptk.few 4f
+ ;;
+
+3: cmp.ltu p6,p0=0,r19 // more?
+(p6) br.cond.dptk.few 2b // loop
+
+ mov r8=0 // success return value
+ ;;
+ br.cond.sptk.few 9f // done
+
+4:
+ ld8 r16=[r15] // read value
+ ;;
+ add r16=r16,in0 // relocate it
+ ;;
+ st8 [r15]=r16 // and store it back
+ br.cond.sptk.few 3b
+
+9:
+ mov ar.pfs=loc0
+ mov rp=loc1
+ ;;
+ br.ret.sptk.few rp
+
+END(_rtld_reloc)
OpenPOWER on IntegriCloud