From 7102c463391b21fea4e6679d5c6539bb04b7f528 Mon Sep 17 00:00:00 2001 From: kib Date: Fri, 12 Sep 2014 16:22:01 +0000 Subject: MFC r270798: Process STT_GNU_IFUNC when doing non-plt relocations. MFC r270802: Only do the second pass over non-plt relocations when the first pass found IFUNCs. Approved by: re (gjb) --- libexec/rtld-elf/amd64/reloc.c | 351 +++++++++++++++++-------------------- libexec/rtld-elf/arm/reloc.c | 4 + libexec/rtld-elf/i386/reloc.c | 264 +++++++++++++--------------- libexec/rtld-elf/ia64/reloc.c | 4 + libexec/rtld-elf/mips/reloc.c | 4 + libexec/rtld-elf/powerpc/reloc.c | 4 + libexec/rtld-elf/powerpc64/reloc.c | 4 + libexec/rtld-elf/rtld.c | 13 +- libexec/rtld-elf/rtld.h | 3 + libexec/rtld-elf/sparc64/reloc.c | 4 + 10 files changed, 321 insertions(+), 334 deletions(-) (limited to 'libexec') diff --git a/libexec/rtld-elf/amd64/reloc.c b/libexec/rtld-elf/amd64/reloc.c index 7b002b2..35f33cc 100644 --- a/libexec/rtld-elf/amd64/reloc.c +++ b/libexec/rtld-elf/amd64/reloc.c @@ -125,213 +125,188 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, const Elf_Rela *relalim; const Elf_Rela *rela; SymCache *cache; - int r = -1; + const Elf_Sym *def; + const Obj_Entry *defobj; + Elf_Addr *where, symval; + Elf32_Addr *where32; + int r; + r = -1; /* * The dynamic loader may be called from a thread, we have * limited amounts of stack available so we cannot use alloca(). */ if (obj != obj_rtld) { - cache = calloc(obj->dynsymcount, sizeof(SymCache)); - /* No need to check for NULL here */ + cache = calloc(obj->dynsymcount, sizeof(SymCache)); + /* No need to check for NULL here */ } else - cache = NULL; + cache = NULL; - relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize); + relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize); for (rela = obj->rela; rela < relalim; rela++) { - Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset); - Elf32_Addr *where32 = (Elf32_Addr *)where; - - switch (ELF_R_TYPE(rela->r_info)) { - - case R_X86_64_NONE: - break; - - case R_X86_64_64: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - *where = (Elf_Addr) (defobj->relocbase + def->st_value + rela->r_addend); - } - break; - - case R_X86_64_PC32: - /* - * I don't think the dynamic linker should ever see this - * type of relocation. But the binutils-2.6 tools sometimes - * generate it. - */ - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - *where32 = (Elf32_Addr) (unsigned long) (defobj->relocbase + - def->st_value + rela->r_addend - (Elf_Addr) where); - } - break; - /* missing: R_X86_64_GOT32 R_X86_64_PLT32 */ - - case R_X86_64_COPY: /* - * These are deferred until all other relocations have - * been done. All we do here is make sure that the COPY - * relocation is not in a shared library. They are allowed - * only in executable files. + * First, resolve symbol for relocations which + * reference symbols. */ - if (!obj->mainprog) { - _rtld_error("%s: Unexpected R_X86_64_COPY relocation" - " in shared library", obj->path); - goto done; - } - break; - - case R_X86_64_GLOB_DAT: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - *where = (Elf_Addr) (defobj->relocbase + def->st_value); - } - break; - - case R_X86_64_TPOFF64: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - /* - * We lazily allocate offsets for static TLS as we - * see the first relocation that references the - * TLS block. This allows us to support (small - * amounts of) static TLS in dynamically loaded - * modules. If we run out of space, we generate an - * error. - */ - if (!defobj->tls_done) { - if (!allocate_tls_offset((Obj_Entry*) defobj)) { - _rtld_error("%s: No space available for static " - "Thread Local Storage", obj->path); - goto done; + switch (ELF_R_TYPE(rela->r_info)) { + case R_X86_64_64: + case R_X86_64_PC32: + case R_X86_64_GLOB_DAT: + case R_X86_64_TPOFF64: + case R_X86_64_TPOFF32: + case R_X86_64_DTPMOD64: + case R_X86_64_DTPOFF64: + case R_X86_64_DTPOFF32: + def = find_symdef(ELF_R_SYM(rela->r_info), obj, + &defobj, flags, cache, lockstate); + if (def == NULL) + goto done; + /* + * If symbol is IFUNC, only perform relocation + * when caller allowed it by passing + * SYMLOOK_IFUNC flag. Skip the relocations + * otherwise. + * + * Also error out in case IFUNC relocations + * are specified for TLS, which cannot be + * usefully interpreted. + */ + if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) { + switch (ELF_R_TYPE(rela->r_info)) { + case R_X86_64_64: + case R_X86_64_PC32: + case R_X86_64_GLOB_DAT: + if ((flags & SYMLOOK_IFUNC) == 0) { + obj->non_plt_gnu_ifunc = true; + continue; + } + symval = (Elf_Addr)rtld_resolve_ifunc( + defobj, def); + break; + case R_X86_64_TPOFF64: + case R_X86_64_TPOFF32: + case R_X86_64_DTPMOD64: + case R_X86_64_DTPOFF64: + case R_X86_64_DTPOFF32: + _rtld_error("%s: IFUNC for TLS reloc", + obj->path); + goto done; + } + } else { + if ((flags & SYMLOOK_IFUNC) != 0) + continue; + symval = (Elf_Addr)defobj->relocbase + + def->st_value; } - } - - *where = (Elf_Addr) (def->st_value - defobj->tlsoffset + - rela->r_addend); + break; + default: + if ((flags & SYMLOOK_IFUNC) != 0) + continue; + break; } - break; - - case R_X86_64_TPOFF32: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - /* - * We lazily allocate offsets for static TLS as we - * see the first relocation that references the - * TLS block. This allows us to support (small - * amounts of) static TLS in dynamically loaded - * modules. If we run out of space, we generate an - * error. - */ - if (!defobj->tls_done) { - if (!allocate_tls_offset((Obj_Entry*) defobj)) { - _rtld_error("%s: No space available for static " - "Thread Local Storage", obj->path); - goto done; + where = (Elf_Addr *)(obj->relocbase + rela->r_offset); + where32 = (Elf32_Addr *)where; + + switch (ELF_R_TYPE(rela->r_info)) { + case R_X86_64_NONE: + break; + case R_X86_64_64: + *where = symval + rela->r_addend; + break; + case R_X86_64_PC32: + /* + * I don't think the dynamic linker should + * ever see this type of relocation. But the + * binutils-2.6 tools sometimes generate it. + */ + *where32 = (Elf32_Addr)(unsigned long)(symval + + rela->r_addend - (Elf_Addr)where); + break; + /* missing: R_X86_64_GOT32 R_X86_64_PLT32 */ + case R_X86_64_COPY: + /* + * These are deferred until all other relocations have + * been done. All we do here is make sure that the COPY + * relocation is not in a shared library. They are allowed + * only in executable files. + */ + if (!obj->mainprog) { + _rtld_error("%s: Unexpected R_X86_64_COPY " + "relocation in shared library", obj->path); + goto done; } - } - - *where32 = (Elf32_Addr) (def->st_value - - defobj->tlsoffset + - rela->r_addend); - } - break; - - case R_X86_64_DTPMOD64: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - *where += (Elf_Addr) defobj->tlsindex; - } - break; - - case R_X86_64_DTPOFF64: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - *where += (Elf_Addr) (def->st_value + rela->r_addend); - } - break; - - case R_X86_64_DTPOFF32: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) + break; + case R_X86_64_GLOB_DAT: + *where = symval; + break; + case R_X86_64_TPOFF64: + /* + * We lazily allocate offsets for static TLS + * as we see the first relocation that + * references the TLS block. This allows us to + * support (small amounts of) static TLS in + * dynamically loaded modules. If we run out + * of space, we generate an error. + */ + if (!defobj->tls_done) { + if (!allocate_tls_offset((Obj_Entry*) defobj)) { + _rtld_error("%s: No space available " + "for static Thread Local Storage", + obj->path); + goto done; + } + } + *where = (Elf_Addr)(def->st_value - defobj->tlsoffset + + rela->r_addend); + break; + case R_X86_64_TPOFF32: + /* + * We lazily allocate offsets for static TLS + * as we see the first relocation that + * references the TLS block. This allows us to + * support (small amounts of) static TLS in + * dynamically loaded modules. If we run out + * of space, we generate an error. + */ + if (!defobj->tls_done) { + if (!allocate_tls_offset((Obj_Entry*) defobj)) { + _rtld_error("%s: No space available " + "for static Thread Local Storage", + obj->path); + goto done; + } + } + *where32 = (Elf32_Addr)(def->st_value - + defobj->tlsoffset + rela->r_addend); + break; + case R_X86_64_DTPMOD64: + *where += (Elf_Addr)defobj->tlsindex; + break; + case R_X86_64_DTPOFF64: + *where += (Elf_Addr)(def->st_value + rela->r_addend); + break; + case R_X86_64_DTPOFF32: + *where32 += (Elf32_Addr)(def->st_value + + rela->r_addend); + break; + case R_X86_64_RELATIVE: + *where = (Elf_Addr)(obj->relocbase + rela->r_addend); + break; + /* + * missing: + * R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16, + * R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8 + */ + default: + _rtld_error("%s: Unsupported relocation type %u" + " in non-PLT relocations\n", obj->path, + (unsigned int)ELF_R_TYPE(rela->r_info)); goto done; - - *where32 += (Elf32_Addr) (def->st_value + rela->r_addend); } - break; - - case R_X86_64_RELATIVE: - *where = (Elf_Addr)(obj->relocbase + rela->r_addend); - break; - - /* missing: R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16, R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8 */ - - default: - _rtld_error("%s: Unsupported relocation type %u" - " in non-PLT relocations\n", obj->path, - (unsigned int)ELF_R_TYPE(rela->r_info)); - goto done; - } } r = 0; done: - if (cache != NULL) - free(cache); + free(cache); return (r); } diff --git a/libexec/rtld-elf/arm/reloc.c b/libexec/rtld-elf/arm/reloc.c index 715cb7e..9cbdc0e 100644 --- a/libexec/rtld-elf/arm/reloc.c +++ b/libexec/rtld-elf/arm/reloc.c @@ -324,6 +324,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, /* The relocation for the dynamic loader has already been done. */ if (obj == obj_rtld) return (0); + if ((flags & SYMLOOK_IFUNC) != 0) + /* XXX not implemented */ + return (0); + /* * The dynamic loader may be called from a thread, we have * limited amounts of stack available so we cannot use alloca(). diff --git a/libexec/rtld-elf/i386/reloc.c b/libexec/rtld-elf/i386/reloc.c index 58073db..c1e0a39 100644 --- a/libexec/rtld-elf/i386/reloc.c +++ b/libexec/rtld-elf/i386/reloc.c @@ -126,168 +126,144 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, const Elf_Rel *rellim; const Elf_Rel *rel; SymCache *cache; - int r = -1; + const Elf_Sym *def; + const Obj_Entry *defobj; + Elf_Addr *where, symval, add; + int r; + r = -1; /* * The dynamic loader may be called from a thread, we have * limited amounts of stack available so we cannot use alloca(). */ if (obj != obj_rtld) { - cache = calloc(obj->dynsymcount, sizeof(SymCache)); - /* No need to check for NULL here */ + cache = calloc(obj->dynsymcount, sizeof(SymCache)); + /* No need to check for NULL here */ } else - cache = NULL; + cache = NULL; - rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize); + rellim = (const Elf_Rel *)((caddr_t) obj->rel + obj->relsize); for (rel = obj->rel; rel < rellim; rel++) { - Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rel->r_offset); - - switch (ELF_R_TYPE(rel->r_info)) { - - case R_386_NONE: - break; - - case R_386_32: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - *where += (Elf_Addr) (defobj->relocbase + def->st_value); - } - break; - - case R_386_PC32: - /* - * I don't think the dynamic linker should ever see this - * type of relocation. But the binutils-2.6 tools sometimes - * generate it. - */ - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - *where += - (Elf_Addr) (defobj->relocbase + def->st_value) - - (Elf_Addr) where; - } - break; - - case R_386_COPY: - /* - * These are deferred until all other relocations have - * been done. All we do here is make sure that the COPY - * relocation is not in a shared library. They are allowed - * only in executable files. - */ - if (!obj->mainprog) { - _rtld_error("%s: Unexpected R_386_COPY relocation" - " in shared library", obj->path); - goto done; - } - break; - - case R_386_GLOB_DAT: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - *where = (Elf_Addr) (defobj->relocbase + def->st_value); + switch (ELF_R_TYPE(rel->r_info)) { + case R_386_32: + case R_386_PC32: + case R_386_GLOB_DAT: + case R_386_TLS_TPOFF: + case R_386_TLS_TPOFF32: + case R_386_TLS_DTPMOD32: + case R_386_TLS_DTPOFF32: + def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + flags, cache, lockstate); + if (def == NULL) + goto done; + if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) { + switch (ELF_R_TYPE(rel->r_info)) { + case R_386_32: + case R_386_PC32: + case R_386_GLOB_DAT: + if ((flags & SYMLOOK_IFUNC) == 0) { + obj->non_plt_gnu_ifunc = true; + continue; + } + symval = (Elf_Addr)rtld_resolve_ifunc( + defobj, def); + break; + case R_386_TLS_TPOFF: + case R_386_TLS_TPOFF32: + case R_386_TLS_DTPMOD32: + case R_386_TLS_DTPOFF32: + _rtld_error("%s: IFUNC for TLS reloc", + obj->path); + goto done; + } + } else { + if ((flags & SYMLOOK_IFUNC) != 0) + continue; + symval = (Elf_Addr)defobj->relocbase + + def->st_value; + } + break; + default: + if ((flags & SYMLOOK_IFUNC) != 0) + continue; + break; } - break; - - case R_386_RELATIVE: - *where += (Elf_Addr) obj->relocbase; - break; - - case R_386_TLS_TPOFF: - case R_386_TLS_TPOFF32: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - Elf_Addr add; - - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - + where = (Elf_Addr *)(obj->relocbase + rel->r_offset); + + switch (ELF_R_TYPE(rel->r_info)) { + case R_386_NONE: + break; + case R_386_32: + *where += symval; + break; + case R_386_PC32: /* - * We lazily allocate offsets for static TLS as we - * see the first relocation that references the - * TLS block. This allows us to support (small - * amounts of) static TLS in dynamically loaded - * modules. If we run out of space, we generate an - * error. + * I don't think the dynamic linker should ever + * see this type of relocation. But the + * binutils-2.6 tools sometimes generate it. */ - if (!defobj->tls_done) { - if (!allocate_tls_offset((Obj_Entry*) defobj)) { - _rtld_error("%s: No space available for static " - "Thread Local Storage", obj->path); - goto done; + *where += symval - (Elf_Addr)where; + break; + case R_386_COPY: + /* + * These are deferred until all other + * relocations have been done. All we do here + * is make sure that the COPY relocation is + * not in a shared library. They are allowed + * only in executable files. + */ + if (!obj->mainprog) { + _rtld_error("%s: Unexpected R_386_COPY " + "relocation in shared library", obj->path); + goto done; } - } - add = (Elf_Addr) (def->st_value - defobj->tlsoffset); - if (ELF_R_TYPE(rel->r_info) == R_386_TLS_TPOFF) - *where += add; - else - *where -= add; - } - break; - - case R_386_TLS_DTPMOD32: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) - goto done; - - *where += (Elf_Addr) defobj->tlsindex; - } - break; - - case R_386_TLS_DTPOFF32: - { - const Elf_Sym *def; - const Obj_Entry *defobj; - - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, - flags, cache, lockstate); - if (def == NULL) + break; + case R_386_GLOB_DAT: + *where = symval; + break; + case R_386_RELATIVE: + *where += (Elf_Addr)obj->relocbase; + break; + case R_386_TLS_TPOFF: + case R_386_TLS_TPOFF32: + /* + * We lazily allocate offsets for static TLS + * as we see the first relocation that + * references the TLS block. This allows us to + * support (small amounts of) static TLS in + * dynamically loaded modules. If we run out + * of space, we generate an error. + */ + if (!defobj->tls_done) { + if (!allocate_tls_offset((Obj_Entry*) defobj)) { + _rtld_error("%s: No space available " + "for static Thread Local Storage", + obj->path); + goto done; + } + } + add = (Elf_Addr)(def->st_value - defobj->tlsoffset); + if (ELF_R_TYPE(rel->r_info) == R_386_TLS_TPOFF) + *where += add; + else + *where -= add; + break; + case R_386_TLS_DTPMOD32: + *where += (Elf_Addr)defobj->tlsindex; + break; + case R_386_TLS_DTPOFF32: + *where += (Elf_Addr) def->st_value; + break; + default: + _rtld_error("%s: Unsupported relocation type %d" + " in non-PLT relocations\n", obj->path, + ELF_R_TYPE(rel->r_info)); goto done; - - *where += (Elf_Addr) def->st_value; } - break; - - default: - _rtld_error("%s: Unsupported relocation type %d" - " in non-PLT relocations\n", obj->path, - ELF_R_TYPE(rel->r_info)); - goto done; - } } r = 0; done: - if (cache != NULL) - free(cache); + free(cache); return (r); } diff --git a/libexec/rtld-elf/ia64/reloc.c b/libexec/rtld-elf/ia64/reloc.c index 1a41cb3..539b7ef 100644 --- a/libexec/rtld-elf/ia64/reloc.c +++ b/libexec/rtld-elf/ia64/reloc.c @@ -351,6 +351,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, int bytes = obj->dynsymcount * sizeof(SymCache); int r = -1; + if ((flags & SYMLOOK_IFUNC) != 0) + /* XXX not implemented */ + return (0); + /* * The dynamic loader may be called from a thread, we have * limited amounts of stack available so we cannot use alloca(). diff --git a/libexec/rtld-elf/mips/reloc.c b/libexec/rtld-elf/mips/reloc.c index 24e56ce..4e750d7 100644 --- a/libexec/rtld-elf/mips/reloc.c +++ b/libexec/rtld-elf/mips/reloc.c @@ -275,6 +275,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, if (obj == obj_rtld) return (0); + if ((flags & SYMLOOK_IFUNC) != 0) + /* XXX not implemented */ + return (0); + #ifdef SUPPORT_OLD_BROKEN_LD broken = 0; sym = obj->symtab; diff --git a/libexec/rtld-elf/powerpc/reloc.c b/libexec/rtld-elf/powerpc/reloc.c index 838cfe6..89e5536 100644 --- a/libexec/rtld-elf/powerpc/reloc.c +++ b/libexec/rtld-elf/powerpc/reloc.c @@ -294,6 +294,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, SymCache *cache; int r = -1; + if ((flags & SYMLOOK_IFUNC) != 0) + /* XXX not implemented */ + return (0); + /* * The dynamic loader may be called from a thread, we have * limited amounts of stack available so we cannot use alloca(). diff --git a/libexec/rtld-elf/powerpc64/reloc.c b/libexec/rtld-elf/powerpc64/reloc.c index fb5325f..65db28f 100644 --- a/libexec/rtld-elf/powerpc64/reloc.c +++ b/libexec/rtld-elf/powerpc64/reloc.c @@ -290,6 +290,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, int bytes = obj->dynsymcount * sizeof(SymCache); int r = -1; + if ((flags & SYMLOOK_IFUNC) != 0) + /* XXX not implemented */ + return (0); + /* * The dynamic loader may be called from a thread, we have * limited amounts of stack available so we cannot use alloca(). diff --git a/libexec/rtld-elf/rtld.c b/libexec/rtld-elf/rtld.c index 344a6d9..06eecb7 100644 --- a/libexec/rtld-elf/rtld.c +++ b/libexec/rtld-elf/rtld.c @@ -2475,7 +2475,7 @@ relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, } } - /* Process the non-PLT relocations. */ + /* Process the non-PLT non-IFUNC relocations. */ if (reloc_non_plt(obj, rtldobj, flags, lockstate)) return (-1); @@ -2488,7 +2488,6 @@ relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, } } - /* Set the special PLT or GOT entries. */ init_pltgot(obj); @@ -2500,6 +2499,16 @@ relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj, if (reloc_jmpslots(obj, flags, lockstate) == -1) return (-1); + /* + * Process the non-PLT IFUNC relocations. The relocations are + * processed in two phases, because IFUNC resolvers may + * reference other symbols, which must be readily processed + * before resolvers are called. + */ + if (obj->non_plt_gnu_ifunc && + reloc_non_plt(obj, rtldobj, flags | SYMLOOK_IFUNC, lockstate)) + return (-1); + if (obj->relro_size > 0) { if (mprotect(obj->relro_page, obj->relro_size, PROT_READ) == -1) { diff --git a/libexec/rtld-elf/rtld.h b/libexec/rtld-elf/rtld.h index d186dcc..3356dda 100644 --- a/libexec/rtld-elf/rtld.h +++ b/libexec/rtld-elf/rtld.h @@ -268,6 +268,7 @@ typedef struct Struct_Obj_Entry { bool filtees_loaded : 1; /* Filtees loaded */ bool irelative : 1; /* Object has R_MACHDEP_IRELATIVE relocs */ bool gnu_ifunc : 1; /* Object has references to STT_GNU_IFUNC */ + bool non_plt_gnu_ifunc : 1; /* Object has non-plt IFUNC references */ bool crt_no_init : 1; /* Object' crt does not call _init/_fini */ bool valid_hash_sysv : 1; /* A valid System V hash hash tag is available */ bool valid_hash_gnu : 1; /* A valid GNU hash tag is available */ @@ -290,6 +291,8 @@ typedef struct Struct_Obj_Entry { #define SYMLOOK_DLSYM 0x02 /* Return newest versioned symbol. Used by dlsym. */ #define SYMLOOK_EARLY 0x04 /* Symlook is done during initialization. */ +#define SYMLOOK_IFUNC 0x08 /* Allow IFUNC processing in + reloc_non_plt(). */ /* Flags for load_object(). */ #define RTLD_LO_NOLOAD 0x01 /* dlopen() specified RTLD_NOLOAD. */ diff --git a/libexec/rtld-elf/sparc64/reloc.c b/libexec/rtld-elf/sparc64/reloc.c index 21fae5c..738a847 100644 --- a/libexec/rtld-elf/sparc64/reloc.c +++ b/libexec/rtld-elf/sparc64/reloc.c @@ -300,6 +300,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, SymCache *cache; int r = -1; + if ((flags & SYMLOOK_IFUNC) != 0) + /* XXX not implemented */ + return (0); + /* * The dynamic loader may be called from a thread, we have * limited amounts of stack available so we cannot use alloca(). -- cgit v1.1