summaryrefslogtreecommitdiffstats
path: root/libexec/rtld-elf
diff options
context:
space:
mode:
authornwhitehorn <nwhitehorn@FreeBSD.org>2016-01-01 00:11:29 +0000
committernwhitehorn <nwhitehorn@FreeBSD.org>2016-01-01 00:11:29 +0000
commited55793422d36f8a8036159c0b7c8011bc2fe2fb (patch)
tree9068fb5ae26b2afe55087154e81377cd36ee9678 /libexec/rtld-elf
parent98dfb3c76272919eb79ecb125e6bbb47299fdb4f (diff)
downloadFreeBSD-src-ed55793422d36f8a8036159c0b7c8011bc2fe2fb.zip
FreeBSD-src-ed55793422d36f8a8036159c0b7c8011bc2fe2fb.tar.gz
Unify the ELFv1 and ELFv2 code paths and make ELFv1 (the normal ABI) more
correct in the process. MFC after: 2 weeks
Diffstat (limited to 'libexec/rtld-elf')
-rw-r--r--libexec/rtld-elf/powerpc64/reloc.c57
-rw-r--r--libexec/rtld-elf/powerpc64/rtld_start.S15
2 files changed, 33 insertions, 39 deletions
diff --git a/libexec/rtld-elf/powerpc64/reloc.c b/libexec/rtld-elf/powerpc64/reloc.c
index 0c75a2a..c428d6b 100644
--- a/libexec/rtld-elf/powerpc64/reloc.c
+++ b/libexec/rtld-elf/powerpc64/reloc.c
@@ -338,26 +338,19 @@ static int
reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela)
{
Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
-#if !defined(_CALL_ELF) || _CALL_ELF == 1
- Elf_Addr *glink;
-#endif
long reloff;
reloff = rela - obj->pltrela;
-#if !defined(_CALL_ELF) || _CALL_ELF == 1
- if (obj->priv == NULL)
- obj->priv = xmalloc(obj->pltrelasize);
- glink = obj->priv + reloff*sizeof(Elf_Addr)*2;
-
- dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%p", (void *)where, reloff, glink);
+ dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%#lx", (void *)where,
+ reloff, obj->glink);
- memcpy(where, _rtld_bind_start, sizeof(struct funcdesc));
- ((struct funcdesc *)(where))->env = (Elf_Addr)glink;
- *(glink++) = (Elf_Addr)obj;
- *(glink++) = reloff*sizeof(Elf_Rela);
+#if !defined(_CALL_ELF) || _CALL_ELF == 1
+ /* Glink code is 3 instructions after the first 32k, 2 before */
+ *where = (Elf_Addr)obj->glink + 32 +
+ 8*((reloff < 0x8000) ? reloff : 0x8000) +
+ 12*((reloff < 0x8000) ? 0 : (reloff - 0x8000));
#else
- dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%#lx", (void *)where, reloff, obj->glink);
*where = (Elf_Addr)obj->glink + 4*reloff + 32;
#endif
@@ -416,13 +409,6 @@ reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
target = (Elf_Addr)(defobj->relocbase + def->st_value);
-#if 0
- /* PG XXX */
- dbg("\"%s\" in \"%s\" --> %p in \"%s\"",
- defobj->strtab + def->st_name, basename(obj->path),
- (void *)target, basename(defobj->path));
-#endif
-
if (def == &sym_zero) {
/* Zero undefined weak symbols */
#if !defined(_CALL_ELF) || _CALL_ELF == 1
@@ -461,12 +447,28 @@ reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj,
(void *)wherep, (void *)target, *(Elf_Addr *)target,
(Elf_Addr)defobj->relocbase);
+ /*
+ * For the trampoline, the second two elements of the function
+ * descriptor are unused, so we are fine replacing those at any time
+ * with the real ones with no thread safety implications. However, we
+ * need to make sure the main entry point pointer ([0]) is seen to be
+ * modified *after* the second two elements. This can't be done in
+ * general, since there are no barriers in the reading code, but put in
+ * some isyncs to at least make it a little better.
+ */
memcpy(wherep, (void *)target, sizeof(struct funcdesc));
+ wherep[2] = ((Elf_Addr *)target)[2];
+ wherep[1] = ((Elf_Addr *)target)[1];
+ __asm __volatile ("isync" : : : "memory");
+ wherep[0] = ((Elf_Addr *)target)[0];
+ __asm __volatile ("isync" : : : "memory");
+
if (((struct funcdesc *)(wherep))->addr < (Elf_Addr)defobj->relocbase) {
/*
- * XXX: It is possible (e.g. LD_BIND_NOW) that the function
+ * It is possible (LD_BIND_NOW) that the function
* descriptor we are copying has not yet been relocated.
- * If this happens, fix it.
+ * If this happens, fix it. Don't worry about threading in
+ * this case since LD_BIND_NOW makes it irrelevant.
*/
((struct funcdesc *)(wherep))->addr +=
@@ -481,8 +483,6 @@ reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj,
*wherep = target;
#endif
- __asm __volatile("sync" ::: "memory");
-
return (target);
}
@@ -506,7 +506,6 @@ reloc_gnu_ifunc(Obj_Entry *obj, int flags,
void
init_pltgot(Obj_Entry *obj)
{
-#if defined(_CALL_ELF) && _CALL_ELF == 2
Elf_Addr *pltcall;
pltcall = obj->pltgot;
@@ -515,10 +514,12 @@ init_pltgot(Obj_Entry *obj)
return;
}
+#if defined(_CALL_ELF) && _CALL_ELF == 2
pltcall[0] = (Elf_Addr)&_rtld_bind_start;
pltcall[1] = (Elf_Addr)obj;
-
- __asm __volatile("sync" ::: "memory");
+#else
+ memcpy(pltcall, _rtld_bind_start, sizeof(struct funcdesc));
+ pltcall[2] = (Elf_Addr)obj;
#endif
}
diff --git a/libexec/rtld-elf/powerpc64/rtld_start.S b/libexec/rtld-elf/powerpc64/rtld_start.S
index ee04d97..c274b6f 100644
--- a/libexec/rtld-elf/powerpc64/rtld_start.S
+++ b/libexec/rtld-elf/powerpc64/rtld_start.S
@@ -111,10 +111,7 @@ _ENTRY(_rtld_start)
*
* Call into the MI binder. This routine is reached via the PLT call cell
*
- * For ELFv1, on entry, %r11 contains a pointer to the (object, relocation)
- * tuple.
- *
- * For ELFv2, %r11 contains an object pointer and %r0 contains the PLT index.
+ * On entry, %r11 contains an object pointer and %r0 contains the PLT index.
*
* Save all registers, call into the binder to resolve and fixup the external
* routine, and then transfer to the external routine on return.
@@ -122,7 +119,7 @@ _ENTRY(_rtld_start)
.globl _rtld_bind
_ENTRY(_rtld_bind_start)
- mr %r12,%r0 # shunt r0 immediately to r12 for ELFv2
+ mr %r12,%r0 # save r0 (index) immediately to r12
mflr %r0
std %r0,16(%r1) # save lr
mfcr %r0
@@ -139,13 +136,9 @@ _ENTRY(_rtld_bind_start)
std %r9,64+6*8(%r1)
std %r10,64+7*8(%r1)
-#if !defined(_CALL_ELF) || _CALL_ELF == 1
- ld %r3,0(%r11)
- ld %r4,8(%r11)
-#else
mr %r3,%r11
- mulli %r4,%r12,24 /* Multiply index by sizeof(Elf_Rela) */
-#endif
+ mulli %r4,%r12,24 # Multiply index by sizeof(Elf_Rela)
+
bl _rtld_bind # target addr = _rtld_bind(obj, reloff)
nop
OpenPOWER on IntegriCloud