summaryrefslogtreecommitdiffstats
path: root/contrib/binutils/bfd/elf64-ppc.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/binutils/bfd/elf64-ppc.c')
-rw-r--r--contrib/binutils/bfd/elf64-ppc.c2496
1 files changed, 1968 insertions, 528 deletions
diff --git a/contrib/binutils/bfd/elf64-ppc.c b/contrib/binutils/bfd/elf64-ppc.c
index d25c25f..e431c14 100644
--- a/contrib/binutils/bfd/elf64-ppc.c
+++ b/contrib/binutils/bfd/elf64-ppc.c
@@ -55,64 +55,10 @@ static bfd_reloc_status_type ppc64_elf_toc64_reloc
PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
static bfd_reloc_status_type ppc64_elf_unhandled_reloc
PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
-static boolean ppc64_elf_set_private_flags
- PARAMS ((bfd *, flagword));
+static boolean ppc64_elf_object_p
+ PARAMS ((bfd *));
static boolean ppc64_elf_merge_private_bfd_data
PARAMS ((bfd *, bfd *));
-static boolean ppc64_elf_section_from_shdr
- PARAMS ((bfd *, Elf64_Internal_Shdr *, char *));
-static struct bfd_hash_entry *link_hash_newfunc
- PARAMS ((struct bfd_hash_entry *, struct bfd_hash_table *, const char *));
-static struct bfd_link_hash_table *ppc64_elf_link_hash_table_create
- PARAMS ((bfd *));
-static boolean create_linkage_sections
- PARAMS ((bfd *, struct bfd_link_info *));
-static boolean create_got_section
- PARAMS ((bfd *, struct bfd_link_info *));
-static boolean ppc64_elf_create_dynamic_sections
- PARAMS ((bfd *, struct bfd_link_info *));
-static void ppc64_elf_copy_indirect_symbol
- PARAMS ((struct elf_link_hash_entry *, struct elf_link_hash_entry *));
-static boolean ppc64_elf_check_relocs
- PARAMS ((bfd *, struct bfd_link_info *, asection *,
- const Elf_Internal_Rela *));
-static asection * ppc64_elf_gc_mark_hook
- PARAMS ((bfd *abfd, struct bfd_link_info *info, Elf_Internal_Rela *rel,
- struct elf_link_hash_entry *h, Elf_Internal_Sym *sym));
-static boolean ppc64_elf_gc_sweep_hook
- PARAMS ((bfd *abfd, struct bfd_link_info *info, asection *sec,
- const Elf_Internal_Rela *relocs));
-static boolean func_desc_adjust
- PARAMS ((struct elf_link_hash_entry *, PTR));
-static boolean ppc64_elf_func_desc_adjust
- PARAMS ((bfd *, struct bfd_link_info *));
-static boolean ppc64_elf_adjust_dynamic_symbol
- PARAMS ((struct bfd_link_info *, struct elf_link_hash_entry *));
-static void ppc64_elf_hide_symbol
- PARAMS ((struct bfd_link_info *, struct elf_link_hash_entry *, boolean));
-static boolean allocate_dynrelocs
- PARAMS ((struct elf_link_hash_entry *, PTR));
-static boolean readonly_dynrelocs
- PARAMS ((struct elf_link_hash_entry *, PTR));
-static enum elf_reloc_type_class ppc64_elf_reloc_type_class
- PARAMS ((const Elf_Internal_Rela *));
-static boolean ppc64_elf_size_dynamic_sections
- PARAMS ((bfd *, struct bfd_link_info *));
-static bfd_byte *build_plt_stub
- PARAMS ((bfd *, bfd_byte *, int, int));
-static boolean build_one_stub
- PARAMS ((struct elf_link_hash_entry *, PTR));
-static boolean ppc64_elf_fake_sections
- PARAMS ((bfd *, Elf64_Internal_Shdr *, asection *));
-static boolean ppc64_elf_relocate_section
- PARAMS ((bfd *, struct bfd_link_info *info, bfd *, asection *, bfd_byte *,
- Elf_Internal_Rela *relocs, Elf_Internal_Sym *local_syms,
- asection **));
-static boolean ppc64_elf_finish_dynamic_symbol
- PARAMS ((bfd *, struct bfd_link_info *, struct elf_link_hash_entry *,
- Elf_Internal_Sym *));
-static boolean ppc64_elf_finish_dynamic_sections
- PARAMS ((bfd *, struct bfd_link_info *));
/* The name of the dynamic interpreter. This is put in the .interp
@@ -158,7 +104,7 @@ static boolean ppc64_elf_finish_dynamic_sections
#define CROR_151515 0x4def7b82
#define CROR_313131 0x4ffffb82
-/* .glink entries for the first 32k functions are two instructions. */
+/* .glink entries for the first 32k functions are two instructions. */
#define LI_R0_0 0x38000000 /* li %r0,0 */
#define B_DOT 0x48000000 /* b . */
@@ -175,12 +121,13 @@ static boolean ppc64_elf_finish_dynamic_sections
/* Since .opd is an array of descriptors and each entry will end up
with identical R_PPC64_RELATIVE relocs, there is really no need to
propagate .opd relocs; The dynamic linker should be taught to
- relocate .opd without reloc entries. FIXME: .opd should be trimmed
- of unused values. */
+ relocate .opd without reloc entries. */
#ifndef NO_OPD_RELOCS
#define NO_OPD_RELOCS 0
#endif
+#define ONES(n) (((bfd_vma) 1 << ((n) - 1) << 1) - 1)
+
/* Relocation HOWTO's. */
static reloc_howto_type *ppc64_elf_howto_table[(int) R_PPC_max];
@@ -188,11 +135,11 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
/* This reloc does nothing. */
HOWTO (R_PPC64_NONE, /* type */
0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 8, /* bitsize */
false, /* pc_relative */
0, /* bitpos */
- complain_overflow_bitfield, /* complain_on_overflow */
+ complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
"R_PPC64_NONE", /* name */
false, /* partial_inplace */
@@ -228,7 +175,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_ADDR24", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0x3fffffc, /* dst_mask */
+ 0x03fffffc, /* dst_mask */
false), /* pcrel_offset */
/* A standard 16 bit relocation. */
@@ -305,7 +252,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_ADDR14", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xfffc, /* dst_mask */
+ 0x0000fffc, /* dst_mask */
false), /* pcrel_offset */
/* An absolute 16 bit branch, for which bit 10 should be set to
@@ -322,7 +269,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_ADDR14_BRTAKEN",/* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xfffc, /* dst_mask */
+ 0x0000fffc, /* dst_mask */
false), /* pcrel_offset */
/* An absolute 16 bit branch, for which bit 10 should be set to
@@ -339,7 +286,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_ADDR14_BRNTAKEN",/* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xfffc, /* dst_mask */
+ 0x0000fffc, /* dst_mask */
false), /* pcrel_offset */
/* A relative 26 bit branch; the lower two bits must be zero. */
@@ -354,7 +301,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_REL24", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0x3fffffc, /* dst_mask */
+ 0x03fffffc, /* dst_mask */
true), /* pcrel_offset */
/* A relative 16 bit branch; the lower two bits must be zero. */
@@ -369,7 +316,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_REL14", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xfffc, /* dst_mask */
+ 0x0000fffc, /* dst_mask */
true), /* pcrel_offset */
/* A relative 16 bit branch. Bit 10 should be set to indicate that
@@ -386,7 +333,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_REL14_BRTAKEN", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xfffc, /* dst_mask */
+ 0x0000fffc, /* dst_mask */
true), /* pcrel_offset */
/* A relative 16 bit branch. Bit 10 should be set to indicate that
@@ -403,7 +350,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_REL14_BRNTAKEN",/* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xfffc, /* dst_mask */
+ 0x0000fffc, /* dst_mask */
true), /* pcrel_offset */
/* Like R_PPC64_ADDR16, but referring to the GOT table entry for the
@@ -477,12 +424,12 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
run has to have the data at some particular address. */
HOWTO (R_PPC64_COPY, /* type */
0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
+ 0, /* this one is variable size */
+ 0, /* bitsize */
false, /* pc_relative */
0, /* bitpos */
- complain_overflow_bitfield, /* complain_on_overflow */
- ppc64_elf_unhandled_reloc, /* special_function */
+ complain_overflow_dont, /* complain_on_overflow */
+ ppc64_elf_unhandled_reloc, /* special_function */
"R_PPC64_COPY", /* name */
false, /* partial_inplace */
0, /* src_mask */
@@ -502,7 +449,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_GLOB_DAT", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xffffffffffffffff, /* dst_mask */
+ ONES (64), /* dst_mask */
false), /* pcrel_offset */
/* Created by the link editor. Marks a procedure linkage table
@@ -535,7 +482,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_RELATIVE", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xffffffffffffffff, /* dst_mask */
+ ONES (64), /* dst_mask */
false), /* pcrel_offset */
/* Like R_PPC64_ADDR32, but may be unaligned. */
@@ -575,7 +522,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
32, /* bitsize */
true, /* pc_relative */
0, /* bitpos */
- /* FIXME: Verify. Was complain_overflow_bitfield. */
+ /* FIXME: Verify. Was complain_overflow_bitfield. */
complain_overflow_signed, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
"R_PPC64_REL32", /* name */
@@ -596,7 +543,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_PLT32", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0, /* dst_mask */
+ 0xffffffff, /* dst_mask */
false), /* pcrel_offset */
/* 32-bit PC relative relocation to the symbol's procedure linkage table.
@@ -612,7 +559,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_PLTREL32", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0, /* dst_mask */
+ 0xffffffff, /* dst_mask */
true), /* pcrel_offset */
/* Like R_PPC64_ADDR16_LO, but referring to the PLT table entry for
@@ -753,7 +700,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_ADDR64", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xffffffffffffffff, /* dst_mask */
+ ONES (64), /* dst_mask */
false), /* pcrel_offset */
/* The bits 32-47 of an address. */
@@ -830,7 +777,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_UADDR64", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xffffffffffffffff, /* dst_mask */
+ ONES (64), /* dst_mask */
false), /* pcrel_offset */
/* 64-bit relative relocation. */
@@ -845,10 +792,10 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_REL64", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xffffffffffffffff, /* dst_mask */
+ ONES (64), /* dst_mask */
true), /* pcrel_offset */
- /* 64-bit relocation to the symbol's procedure linkage table. */
+ /* 64-bit relocation to the symbol's procedure linkage table. */
HOWTO (R_PPC64_PLT64, /* type */
0, /* rightshift */
4, /* size (0=byte, 1=short, 2=long, 4=64 bits) */
@@ -860,7 +807,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_PLT64", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0, /* dst_mask */
+ ONES (64), /* dst_mask */
false), /* pcrel_offset */
/* 64-bit PC relative relocation to the symbol's procedure linkage
@@ -877,7 +824,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_PLTREL64", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0, /* dst_mask */
+ ONES (64), /* dst_mask */
true), /* pcrel_offset */
/* 16 bit TOC-relative relocation. */
@@ -964,7 +911,7 @@ static reloc_howto_type ppc64_elf_howto_raw[] = {
"R_PPC64_TOC", /* name */
false, /* partial_inplace */
0, /* src_mask */
- 0xffffffffffffffff, /* dst_mask */
+ ONES (64), /* dst_mask */
false), /* pcrel_offset */
/* Like R_PPC64_GOT16, but also informs the link editor that the
@@ -1411,8 +1358,8 @@ ppc64_elf_info_to_howto (abfd, cache_ptr, dst)
{
unsigned int type;
+ /* Initialize howto table if needed. */
if (!ppc64_elf_howto_table[R_PPC64_ADDR32])
- /* Initialize howto table if needed. */
ppc_howto_init ();
type = ELF64_R_TYPE (dst->r_info);
@@ -1438,7 +1385,7 @@ ppc64_elf_ha_reloc (abfd, reloc_entry, symbol, data,
call the generic function. Any adjustment will be done at final
link time. */
if (output_bfd != NULL)
- return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
+ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
/* Adjust the addend for sign extension of the low 16 bits.
@@ -1469,7 +1416,7 @@ ppc64_elf_brtaken_reloc (abfd, reloc_entry, symbol, data,
call the generic function. Any adjustment will be done at final
link time. */
if (output_bfd != NULL)
- return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
+ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
octets = reloc_entry->address * bfd_octets_per_byte (abfd);
@@ -1478,7 +1425,7 @@ ppc64_elf_brtaken_reloc (abfd, reloc_entry, symbol, data,
r_type = (enum elf_ppc_reloc_type) reloc_entry->howto->type;
if (r_type == R_PPC64_ADDR14_BRTAKEN
|| r_type == R_PPC64_REL14_BRTAKEN)
- insn |= 0x01 << 21; /* 'y' or 't' bit, lowest bit of BO field. */
+ insn |= 0x01 << 21; /* 'y' or 't' bit, lowest bit of BO field. */
if (is_power4)
{
@@ -1530,7 +1477,7 @@ ppc64_elf_sectoff_reloc (abfd, reloc_entry, symbol, data,
call the generic function. Any adjustment will be done at final
link time. */
if (output_bfd != NULL)
- return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
+ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
/* Subtract the symbol section base address. */
@@ -1553,7 +1500,7 @@ ppc64_elf_sectoff_ha_reloc (abfd, reloc_entry, symbol, data,
call the generic function. Any adjustment will be done at final
link time. */
if (output_bfd != NULL)
- return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
+ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
/* Subtract the symbol section base address. */
@@ -1581,7 +1528,7 @@ ppc64_elf_toc_reloc (abfd, reloc_entry, symbol, data,
call the generic function. Any adjustment will be done at final
link time. */
if (output_bfd != NULL)
- return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
+ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
TOCstart = _bfd_get_gp_value (input_section->output_section->owner);
@@ -1610,7 +1557,7 @@ ppc64_elf_toc_ha_reloc (abfd, reloc_entry, symbol, data,
call the generic function. Any adjustment will be done at final
link time. */
if (output_bfd != NULL)
- return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
+ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
TOCstart = _bfd_get_gp_value (input_section->output_section->owner);
@@ -1643,7 +1590,7 @@ ppc64_elf_toc64_reloc (abfd, reloc_entry, symbol, data,
call the generic function. Any adjustment will be done at final
link time. */
if (output_bfd != NULL)
- return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
+ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
TOCstart = _bfd_get_gp_value (input_section->output_section->owner);
@@ -1670,7 +1617,7 @@ ppc64_elf_unhandled_reloc (abfd, reloc_entry, symbol, data,
call the generic function. Any adjustment will be done at final
link time. */
if (output_bfd != NULL)
- return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
+ return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
input_section, output_bfd, error_message);
if (error_message != NULL)
@@ -1683,32 +1630,35 @@ ppc64_elf_unhandled_reloc (abfd, reloc_entry, symbol, data,
return bfd_reloc_dangerous;
}
-/* Function to set whether a module needs the -mrelocatable bit set. */
+/* Fix bad default arch selected for a 64 bit input bfd when the
+ default is 32 bit. */
static boolean
-ppc64_elf_set_private_flags (abfd, flags)
+ppc64_elf_object_p (abfd)
bfd *abfd;
- flagword flags;
{
- BFD_ASSERT (!elf_flags_init (abfd)
- || elf_elfheader (abfd)->e_flags == flags);
+ if (abfd->arch_info->the_default && abfd->arch_info->bits_per_word == 32)
+ {
+ Elf_Internal_Ehdr *i_ehdr = elf_elfheader (abfd);
- elf_elfheader (abfd)->e_flags = flags;
- elf_flags_init (abfd) = true;
+ if (i_ehdr->e_ident[EI_CLASS] == ELFCLASS64)
+ {
+ /* Relies on arch after 32 bit default being 64 bit default. */
+ abfd->arch_info = abfd->arch_info->next;
+ BFD_ASSERT (abfd->arch_info->bits_per_word == 64);
+ }
+ }
return true;
}
/* Merge backend specific data from an object file to the output
object file when linking. */
+
static boolean
ppc64_elf_merge_private_bfd_data (ibfd, obfd)
bfd *ibfd;
bfd *obfd;
{
- flagword old_flags;
- flagword new_flags;
- boolean error;
-
/* Check if we have the same endianess. */
if (ibfd->xvec->byteorder != obfd->xvec->byteorder
&& obfd->xvec->byteorder != BFD_ENDIAN_UNKNOWN)
@@ -1726,106 +1676,6 @@ ppc64_elf_merge_private_bfd_data (ibfd, obfd)
return false;
}
- if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour
- || bfd_get_flavour (obfd) != bfd_target_elf_flavour)
- return true;
-
- new_flags = elf_elfheader (ibfd)->e_flags;
- old_flags = elf_elfheader (obfd)->e_flags;
- if (!elf_flags_init (obfd))
- {
- /* First call, no flags set. */
- elf_flags_init (obfd) = true;
- elf_elfheader (obfd)->e_flags = new_flags;
- }
-
- else if (new_flags == old_flags)
- /* Compatible flags are ok. */
- ;
-
- else
- {
- /* Incompatible flags. Warn about -mrelocatable mismatch.
- Allow -mrelocatable-lib to be linked with either. */
- error = false;
- if ((new_flags & EF_PPC_RELOCATABLE) != 0
- && (old_flags & (EF_PPC_RELOCATABLE | EF_PPC_RELOCATABLE_LIB)) == 0)
- {
- error = true;
- (*_bfd_error_handler)
- (_("%s: compiled with -mrelocatable and linked with modules compiled normally"),
- bfd_archive_filename (ibfd));
- }
- else if ((new_flags & (EF_PPC_RELOCATABLE | EF_PPC_RELOCATABLE_LIB)) == 0
- && (old_flags & EF_PPC_RELOCATABLE) != 0)
- {
- error = true;
- (*_bfd_error_handler)
- (_("%s: compiled normally and linked with modules compiled with -mrelocatable"),
- bfd_archive_filename (ibfd));
- }
-
- /* The output is -mrelocatable-lib iff both the input files are. */
- if (! (new_flags & EF_PPC_RELOCATABLE_LIB))
- elf_elfheader (obfd)->e_flags &= ~EF_PPC_RELOCATABLE_LIB;
-
- /* The output is -mrelocatable iff it can't be -mrelocatable-lib,
- but each input file is either -mrelocatable or -mrelocatable-lib. */
- if (! (elf_elfheader (obfd)->e_flags & EF_PPC_RELOCATABLE_LIB)
- && (new_flags & (EF_PPC_RELOCATABLE_LIB | EF_PPC_RELOCATABLE))
- && (old_flags & (EF_PPC_RELOCATABLE_LIB | EF_PPC_RELOCATABLE)))
- elf_elfheader (obfd)->e_flags |= EF_PPC_RELOCATABLE;
-
- /* Do not warn about eabi vs. V.4 mismatch, just or in the bit
- if any module uses it. */
- elf_elfheader (obfd)->e_flags |= (new_flags & EF_PPC_EMB);
-
- new_flags &= ~(EF_PPC_RELOCATABLE | EF_PPC_RELOCATABLE_LIB | EF_PPC_EMB);
- old_flags &= ~(EF_PPC_RELOCATABLE | EF_PPC_RELOCATABLE_LIB | EF_PPC_EMB);
-
- /* Warn about any other mismatches. */
- if (new_flags != old_flags)
- {
- error = true;
- (*_bfd_error_handler)
- (_("%s: uses different e_flags (0x%lx) fields than previous modules (0x%lx)"),
- bfd_archive_filename (ibfd), (long) new_flags, (long) old_flags);
- }
-
- if (error)
- {
- bfd_set_error (bfd_error_bad_value);
- return false;
- }
- }
-
- return true;
-}
-
-/* Handle a PowerPC specific section when reading an object file. This
- is called when elfcode.h finds a section with an unknown type. */
-
-static boolean
-ppc64_elf_section_from_shdr (abfd, hdr, name)
- bfd *abfd;
- Elf64_Internal_Shdr *hdr;
- char *name;
-{
- asection *newsect;
- flagword flags;
-
- if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name))
- return false;
-
- newsect = hdr->bfd_section;
- flags = bfd_get_section_flags (abfd, newsect);
- if (hdr->sh_flags & SHF_EXCLUDE)
- flags |= SEC_EXCLUDE;
-
- if (hdr->sh_type == SHT_ORDERED)
- flags |= SEC_SORT_ENTRIES;
-
- bfd_set_section_flags (abfd, newsect, flags);
return true;
}
@@ -1921,25 +1771,102 @@ struct ppc_dyn_relocs
selects between relative and absolute types. */
#define IS_ABSOLUTE_RELOC(RTYPE) \
- ((RTYPE) != R_PPC64_REL14 \
- && (RTYPE) != R_PPC64_REL14_BRNTAKEN \
- && (RTYPE) != R_PPC64_REL14_BRTAKEN \
- && (RTYPE) != R_PPC64_REL24 \
- && (RTYPE) != R_PPC64_REL32 \
- && (RTYPE) != R_PPC64_REL64)
+ ((RTYPE) != R_PPC64_REL32 \
+ && (RTYPE) != R_PPC64_REL64 \
+ && (RTYPE) != R_PPC64_ADDR30)
+
+/* Section name for stubs is the associated section name plus this
+ string. */
+#define STUB_SUFFIX ".stub"
+
+/* Linker stubs.
+ ppc_stub_long_branch:
+ Used when a 14 bit branch (or even a 24 bit branch) can't reach its
+ destination, but a 24 bit branch in a stub section will reach.
+ . b dest
+
+ ppc_stub_plt_branch:
+ Similar to the above, but a 24 bit branch in the stub section won't
+ reach its destination.
+ . addis %r12,%r2,xxx@ha
+ . ld %r11,xxx@l(%r12)
+ . mtctr %r11
+ . bctr
+
+ ppc_stub_plt_call:
+ Used to call a function in a shared library.
+ . addis %r12,%r2,xxx@ha
+ . std %r2,40(%r1)
+ . ld %r11,xxx+0@l(%r12)
+ . ld %r2,xxx+8@l(%r12)
+ . mtctr %r11
+ . ld %r11,xxx+16@l(%r12)
+ . bctr
+*/
+
+enum ppc_stub_type {
+ ppc_stub_none,
+ ppc_stub_long_branch,
+ ppc_stub_plt_branch,
+ ppc_stub_plt_call
+};
+
+struct ppc_stub_hash_entry {
+
+ /* Base hash table entry structure. */
+ struct bfd_hash_entry root;
-/* ppc64 ELF linker hash entry. */
+ /* The stub section. */
+ asection *stub_sec;
+
+ /* Offset within stub_sec of the beginning of this stub. */
+ bfd_vma stub_offset;
+
+ /* Given the symbol's value and its section we can determine its final
+ value when building the stubs (so the stub knows where to jump. */
+ bfd_vma target_value;
+ asection *target_section;
+
+ enum ppc_stub_type stub_type;
+
+ /* The symbol table entry, if any, that this was derived from. */
+ struct ppc_link_hash_entry *h;
+
+ /* Where this stub is being called from, or, in the case of combined
+ stub sections, the first input section in the group. */
+ asection *id_sec;
+};
+
+struct ppc_branch_hash_entry {
+
+ /* Base hash table entry structure. */
+ struct bfd_hash_entry root;
+
+ /* Offset within .branch_lt. */
+ unsigned int offset;
+
+ /* Generation marker. */
+ unsigned int iter;
+};
struct ppc_link_hash_entry
{
struct elf_link_hash_entry elf;
+ /* A pointer to the most recently used stub hash entry against this
+ symbol. */
+ struct ppc_stub_hash_entry *stub_cache;
+
/* Track dynamic relocs copied for this symbol. */
struct ppc_dyn_relocs *dyn_relocs;
+ /* Link between function code and descriptor symbols. */
+ struct elf_link_hash_entry *oh;
+
/* Flag function code and descriptor symbols. */
unsigned int is_func:1;
unsigned int is_func_descriptor:1;
+ unsigned int is_entry:1;
};
/* ppc64 ELF linker hash table. */
@@ -1948,6 +1875,32 @@ struct ppc_link_hash_table
{
struct elf_link_hash_table elf;
+ /* The stub hash table. */
+ struct bfd_hash_table stub_hash_table;
+
+ /* Another hash table for plt_branch stubs. */
+ struct bfd_hash_table branch_hash_table;
+
+ /* Linker stub bfd. */
+ bfd *stub_bfd;
+
+ /* Linker call-backs. */
+ asection * (*add_stub_section) PARAMS ((const char *, asection *));
+ void (*layout_sections_again) PARAMS ((void));
+
+ /* Array to keep track of which stub sections have been created, and
+ information on stub grouping. */
+ struct map_stub {
+ /* This is the section to which stubs in the group will be attached. */
+ asection *link_sec;
+ /* The stub section. */
+ asection *stub_sec;
+ } *stub_group;
+
+ /* Assorted information used by ppc64_elf_size_stubs. */
+ int top_index;
+ asection **input_list;
+
/* Short-cuts to get to dynamic linker sections. */
asection *sgot;
asection *srelgot;
@@ -1955,25 +1908,185 @@ struct ppc_link_hash_table
asection *srelplt;
asection *sdynbss;
asection *srelbss;
- asection *sstub;
asection *sglink;
asection *sfpr;
+ asection *sbrlt;
+ asection *srelbrlt;
/* Set on error. */
- int plt_overflow;
+ unsigned int stub_error;
+
+ /* Flag set when small branches are detected. Used to
+ select suitable defaults for the stub group size. */
+ unsigned int has_14bit_branch;
/* Set if we detect a reference undefined weak symbol. */
unsigned int have_undefweak;
+ /* Incremented every time we size stubs. */
+ unsigned int stub_iteration;
+
/* Small local sym to section mapping cache. */
struct sym_sec_cache sym_sec;
};
+static struct bfd_hash_entry *stub_hash_newfunc
+ PARAMS ((struct bfd_hash_entry *, struct bfd_hash_table *, const char *));
+static struct bfd_hash_entry *branch_hash_newfunc
+ PARAMS ((struct bfd_hash_entry *, struct bfd_hash_table *, const char *));
+static struct bfd_hash_entry *link_hash_newfunc
+ PARAMS ((struct bfd_hash_entry *, struct bfd_hash_table *, const char *));
+static struct bfd_link_hash_table *ppc64_elf_link_hash_table_create
+ PARAMS ((bfd *));
+static void ppc64_elf_link_hash_table_free
+ PARAMS ((struct bfd_link_hash_table *));
+static char *ppc_stub_name
+ PARAMS ((const asection *, const asection *,
+ const struct ppc_link_hash_entry *, const Elf_Internal_Rela *));
+static struct ppc_stub_hash_entry *ppc_get_stub_entry
+ PARAMS ((const asection *, const asection *, struct elf_link_hash_entry *,
+ const Elf_Internal_Rela *, struct ppc_link_hash_table *));
+static struct ppc_stub_hash_entry *ppc_add_stub
+ PARAMS ((const char *, asection *, struct ppc_link_hash_table *));
+static boolean create_linkage_sections
+ PARAMS ((bfd *, struct bfd_link_info *));
+static boolean create_got_section
+ PARAMS ((bfd *, struct bfd_link_info *));
+static boolean ppc64_elf_create_dynamic_sections
+ PARAMS ((bfd *, struct bfd_link_info *));
+static void ppc64_elf_copy_indirect_symbol
+ PARAMS ((struct elf_backend_data *, struct elf_link_hash_entry *,
+ struct elf_link_hash_entry *));
+static boolean ppc64_elf_check_relocs
+ PARAMS ((bfd *, struct bfd_link_info *, asection *,
+ const Elf_Internal_Rela *));
+static asection * ppc64_elf_gc_mark_hook
+ PARAMS ((asection *, struct bfd_link_info *, Elf_Internal_Rela *,
+ struct elf_link_hash_entry *, Elf_Internal_Sym *));
+static boolean ppc64_elf_gc_sweep_hook
+ PARAMS ((bfd *, struct bfd_link_info *, asection *,
+ const Elf_Internal_Rela *));
+static boolean func_desc_adjust
+ PARAMS ((struct elf_link_hash_entry *, PTR));
+static boolean ppc64_elf_func_desc_adjust
+ PARAMS ((bfd *, struct bfd_link_info *));
+static boolean ppc64_elf_adjust_dynamic_symbol
+ PARAMS ((struct bfd_link_info *, struct elf_link_hash_entry *));
+static void ppc64_elf_hide_symbol
+ PARAMS ((struct bfd_link_info *, struct elf_link_hash_entry *, boolean));
+static boolean edit_opd
+ PARAMS ((bfd *, struct bfd_link_info *));
+static boolean allocate_dynrelocs
+ PARAMS ((struct elf_link_hash_entry *, PTR));
+static boolean readonly_dynrelocs
+ PARAMS ((struct elf_link_hash_entry *, PTR));
+static enum elf_reloc_type_class ppc64_elf_reloc_type_class
+ PARAMS ((const Elf_Internal_Rela *));
+static boolean ppc64_elf_size_dynamic_sections
+ PARAMS ((bfd *, struct bfd_link_info *));
+static INLINE enum ppc_stub_type ppc_type_of_stub
+ PARAMS ((asection *, const Elf_Internal_Rela *,
+ struct ppc_link_hash_entry **, bfd_vma));
+static bfd_byte *build_plt_stub
+ PARAMS ((bfd *, bfd_byte *, int, int));
+static boolean ppc_build_one_stub
+ PARAMS ((struct bfd_hash_entry *, PTR));
+static boolean ppc_size_one_stub
+ PARAMS ((struct bfd_hash_entry *, PTR));
+static void group_sections
+ PARAMS ((struct ppc_link_hash_table *, bfd_size_type, boolean));
+static boolean ppc64_elf_relocate_section
+ PARAMS ((bfd *, struct bfd_link_info *info, bfd *, asection *, bfd_byte *,
+ Elf_Internal_Rela *relocs, Elf_Internal_Sym *local_syms,
+ asection **));
+static boolean ppc64_elf_finish_dynamic_symbol
+ PARAMS ((bfd *, struct bfd_link_info *, struct elf_link_hash_entry *,
+ Elf_Internal_Sym *));
+static boolean ppc64_elf_finish_dynamic_sections
+ PARAMS ((bfd *, struct bfd_link_info *));
+
/* Get the ppc64 ELF linker hash table from a link_info structure. */
#define ppc_hash_table(p) \
((struct ppc_link_hash_table *) ((p)->hash))
+#define ppc_stub_hash_lookup(table, string, create, copy) \
+ ((struct ppc_stub_hash_entry *) \
+ bfd_hash_lookup ((table), (string), (create), (copy)))
+
+#define ppc_branch_hash_lookup(table, string, create, copy) \
+ ((struct ppc_branch_hash_entry *) \
+ bfd_hash_lookup ((table), (string), (create), (copy)))
+
+/* Create an entry in the stub hash table. */
+
+static struct bfd_hash_entry *
+stub_hash_newfunc (entry, table, string)
+ struct bfd_hash_entry *entry;
+ struct bfd_hash_table *table;
+ const char *string;
+{
+ /* Allocate the structure if it has not already been allocated by a
+ subclass. */
+ if (entry == NULL)
+ {
+ entry = bfd_hash_allocate (table, sizeof (struct ppc_stub_hash_entry));
+ if (entry == NULL)
+ return entry;
+ }
+
+ /* Call the allocation method of the superclass. */
+ entry = bfd_hash_newfunc (entry, table, string);
+ if (entry != NULL)
+ {
+ struct ppc_stub_hash_entry *eh;
+
+ /* Initialize the local fields. */
+ eh = (struct ppc_stub_hash_entry *) entry;
+ eh->stub_sec = NULL;
+ eh->stub_offset = 0;
+ eh->target_value = 0;
+ eh->target_section = NULL;
+ eh->stub_type = ppc_stub_none;
+ eh->h = NULL;
+ eh->id_sec = NULL;
+ }
+
+ return entry;
+}
+
+/* Create an entry in the branch hash table. */
+
+static struct bfd_hash_entry *
+branch_hash_newfunc (entry, table, string)
+ struct bfd_hash_entry *entry;
+ struct bfd_hash_table *table;
+ const char *string;
+{
+ /* Allocate the structure if it has not already been allocated by a
+ subclass. */
+ if (entry == NULL)
+ {
+ entry = bfd_hash_allocate (table, sizeof (struct ppc_branch_hash_entry));
+ if (entry == NULL)
+ return entry;
+ }
+
+ /* Call the allocation method of the superclass. */
+ entry = bfd_hash_newfunc (entry, table, string);
+ if (entry != NULL)
+ {
+ struct ppc_branch_hash_entry *eh;
+
+ /* Initialize the local fields. */
+ eh = (struct ppc_branch_hash_entry *) entry;
+ eh->offset = 0;
+ eh->iter = 0;
+ }
+
+ return entry;
+}
+
/* Create an entry in a ppc64 ELF linker hash table. */
static struct bfd_hash_entry *
@@ -1997,9 +2110,12 @@ link_hash_newfunc (entry, table, string)
{
struct ppc_link_hash_entry *eh = (struct ppc_link_hash_entry *) entry;
+ eh->stub_cache = NULL;
eh->dyn_relocs = NULL;
+ eh->oh = NULL;
eh->is_func = 0;
eh->is_func_descriptor = 0;
+ eh->is_entry = 0;
}
return entry;
@@ -2014,32 +2130,209 @@ ppc64_elf_link_hash_table_create (abfd)
struct ppc_link_hash_table *htab;
bfd_size_type amt = sizeof (struct ppc_link_hash_table);
- htab = (struct ppc_link_hash_table *) bfd_alloc (abfd, amt);
+ htab = (struct ppc_link_hash_table *) bfd_malloc (amt);
if (htab == NULL)
return NULL;
if (! _bfd_elf_link_hash_table_init (&htab->elf, abfd, link_hash_newfunc))
{
- bfd_release (abfd, htab);
+ free (htab);
return NULL;
}
+ /* Init the stub hash table too. */
+ if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc))
+ return NULL;
+
+ /* And the branch hash table. */
+ if (!bfd_hash_table_init (&htab->branch_hash_table, branch_hash_newfunc))
+ return NULL;
+
+ htab->stub_bfd = NULL;
+ htab->add_stub_section = NULL;
+ htab->layout_sections_again = NULL;
+ htab->stub_group = NULL;
htab->sgot = NULL;
htab->srelgot = NULL;
htab->splt = NULL;
htab->srelplt = NULL;
htab->sdynbss = NULL;
htab->srelbss = NULL;
- htab->sstub = NULL;
htab->sglink = NULL;
htab->sfpr = NULL;
- htab->plt_overflow = 0;
+ htab->sbrlt = NULL;
+ htab->srelbrlt = NULL;
+ htab->stub_error = 0;
+ htab->has_14bit_branch = 0;
htab->have_undefweak = 0;
+ htab->stub_iteration = 0;
htab->sym_sec.abfd = NULL;
return &htab->elf.root;
}
+/* Free the derived linker hash table. */
+
+static void
+ppc64_elf_link_hash_table_free (hash)
+ struct bfd_link_hash_table *hash;
+{
+ struct ppc_link_hash_table *ret = (struct ppc_link_hash_table *) hash;
+
+ bfd_hash_table_free (&ret->stub_hash_table);
+ bfd_hash_table_free (&ret->branch_hash_table);
+ _bfd_generic_link_hash_table_free (hash);
+}
+
+/* Build a name for an entry in the stub hash table. */
+
+static char *
+ppc_stub_name (input_section, sym_sec, h, rel)
+ const asection *input_section;
+ const asection *sym_sec;
+ const struct ppc_link_hash_entry *h;
+ const Elf_Internal_Rela *rel;
+{
+ char *stub_name;
+ bfd_size_type len;
+
+ /* rel->r_addend is actually 64 bit, but who uses more than +/- 2^31
+ offsets from a sym as a branch target? In fact, we could
+ probably assume the addend is always zero. */
+ BFD_ASSERT (((int) rel->r_addend & 0xffffffff) == rel->r_addend);
+
+ if (h)
+ {
+ len = 8 + 1 + strlen (h->elf.root.root.string) + 1 + 8 + 1;
+ stub_name = bfd_malloc (len);
+ if (stub_name != NULL)
+ {
+ sprintf (stub_name, "%08x_%s+%x",
+ input_section->id & 0xffffffff,
+ h->elf.root.root.string,
+ (int) rel->r_addend & 0xffffffff);
+ }
+ }
+ else
+ {
+ len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
+ stub_name = bfd_malloc (len);
+ if (stub_name != NULL)
+ {
+ sprintf (stub_name, "%08x_%x:%x+%x",
+ input_section->id & 0xffffffff,
+ sym_sec->id & 0xffffffff,
+ (int) ELF64_R_SYM (rel->r_info) & 0xffffffff,
+ (int) rel->r_addend & 0xffffffff);
+ }
+ }
+ return stub_name;
+}
+
+/* Look up an entry in the stub hash. Stub entries are cached because
+ creating the stub name takes a bit of time. */
+
+static struct ppc_stub_hash_entry *
+ppc_get_stub_entry (input_section, sym_sec, hash, rel, htab)
+ const asection *input_section;
+ const asection *sym_sec;
+ struct elf_link_hash_entry *hash;
+ const Elf_Internal_Rela *rel;
+ struct ppc_link_hash_table *htab;
+{
+ struct ppc_stub_hash_entry *stub_entry;
+ struct ppc_link_hash_entry *h = (struct ppc_link_hash_entry *) hash;
+ const asection *id_sec;
+
+ /* If this input section is part of a group of sections sharing one
+ stub section, then use the id of the first section in the group.
+ Stub names need to include a section id, as there may well be
+ more than one stub used to reach say, printf, and we need to
+ distinguish between them. */
+ id_sec = htab->stub_group[input_section->id].link_sec;
+
+ if (h != NULL && h->stub_cache != NULL
+ && h->stub_cache->h == h
+ && h->stub_cache->id_sec == id_sec)
+ {
+ stub_entry = h->stub_cache;
+ }
+ else
+ {
+ char *stub_name;
+
+ stub_name = ppc_stub_name (id_sec, sym_sec, h, rel);
+ if (stub_name == NULL)
+ return NULL;
+
+ stub_entry = ppc_stub_hash_lookup (&htab->stub_hash_table,
+ stub_name, false, false);
+ if (h != NULL)
+ h->stub_cache = stub_entry;
+
+ free (stub_name);
+ }
+
+ return stub_entry;
+}
+
+/* Add a new stub entry to the stub hash. Not all fields of the new
+ stub entry are initialised. */
+
+static struct ppc_stub_hash_entry *
+ppc_add_stub (stub_name, section, htab)
+ const char *stub_name;
+ asection *section;
+ struct ppc_link_hash_table *htab;
+{
+ asection *link_sec;
+ asection *stub_sec;
+ struct ppc_stub_hash_entry *stub_entry;
+
+ link_sec = htab->stub_group[section->id].link_sec;
+ stub_sec = htab->stub_group[section->id].stub_sec;
+ if (stub_sec == NULL)
+ {
+ stub_sec = htab->stub_group[link_sec->id].stub_sec;
+ if (stub_sec == NULL)
+ {
+ size_t namelen;
+ bfd_size_type len;
+ char *s_name;
+
+ namelen = strlen (link_sec->name);
+ len = namelen + sizeof (STUB_SUFFIX);
+ s_name = bfd_alloc (htab->stub_bfd, len);
+ if (s_name == NULL)
+ return NULL;
+
+ memcpy (s_name, link_sec->name, namelen);
+ memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
+ stub_sec = (*htab->add_stub_section) (s_name, link_sec);
+ if (stub_sec == NULL)
+ return NULL;
+ htab->stub_group[link_sec->id].stub_sec = stub_sec;
+ }
+ htab->stub_group[section->id].stub_sec = stub_sec;
+ }
+
+ /* Enter this entry into the linker stub hash table. */
+ stub_entry = ppc_stub_hash_lookup (&htab->stub_hash_table, stub_name,
+ true, false);
+ if (stub_entry == NULL)
+ {
+ (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
+ bfd_archive_filename (section->owner),
+ stub_name);
+ return NULL;
+ }
+
+ stub_entry->stub_sec = stub_sec;
+ stub_entry->stub_offset = 0;
+ stub_entry->id_sec = link_sec;
+ return stub_entry;
+}
+
/* Create sections for linker generated code. */
static boolean
@@ -2055,24 +2348,38 @@ create_linkage_sections (dynobj, info)
/* Create .sfpr for code to save and restore fp regs. */
flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
| SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED);
- htab->sfpr = bfd_make_section (dynobj, ".sfpr");
+ htab->sfpr = bfd_make_section_anyway (dynobj, ".sfpr");
if (htab->sfpr == NULL
|| ! bfd_set_section_flags (dynobj, htab->sfpr, flags)
|| ! bfd_set_section_alignment (dynobj, htab->sfpr, 2))
return false;
- /* Create .stub and .glink for global linkage functions. */
- htab->sstub = bfd_make_section (dynobj, ".stub");
- if (htab->sstub == NULL
- || ! bfd_set_section_flags (dynobj, htab->sstub, flags)
- || ! bfd_set_section_alignment (dynobj, htab->sstub, 2))
- return false;
- htab->sglink = bfd_make_section (dynobj, ".glink");
+ /* Create .glink for lazy dynamic linking support. */
+ htab->sglink = bfd_make_section_anyway (dynobj, ".glink");
if (htab->sglink == NULL
|| ! bfd_set_section_flags (dynobj, htab->sglink, flags)
|| ! bfd_set_section_alignment (dynobj, htab->sglink, 2))
return false;
+ /* Create .branch_lt for plt_branch stubs. */
+ flags = (SEC_ALLOC | SEC_LOAD
+ | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED);
+ htab->sbrlt = bfd_make_section_anyway (dynobj, ".branch_lt");
+ if (htab->sbrlt == NULL
+ || ! bfd_set_section_flags (dynobj, htab->sbrlt, flags)
+ || ! bfd_set_section_alignment (dynobj, htab->sbrlt, 3))
+ return false;
+
+ if (info->shared)
+ {
+ flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
+ | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED);
+ htab->srelbrlt = bfd_make_section_anyway (dynobj, ".rela.branch_lt");
+ if (!htab->srelbrlt
+ || ! bfd_set_section_flags (dynobj, htab->srelbrlt, flags)
+ || ! bfd_set_section_alignment (dynobj, htab->srelbrlt, 3))
+ return false;
+ }
return true;
}
@@ -2137,7 +2444,8 @@ ppc64_elf_create_dynamic_sections (dynobj, info)
/* Copy the extra info we tack onto an elf_link_hash_entry. */
static void
-ppc64_elf_copy_indirect_symbol (dir, ind)
+ppc64_elf_copy_indirect_symbol (bed, dir, ind)
+ struct elf_backend_data *bed;
struct elf_link_hash_entry *dir, *ind;
{
struct ppc_link_hash_entry *edir, *eind;
@@ -2181,8 +2489,31 @@ ppc64_elf_copy_indirect_symbol (dir, ind)
edir->is_func |= eind->is_func;
edir->is_func_descriptor |= eind->is_func_descriptor;
+ edir->is_entry |= eind->is_entry;
- _bfd_elf_link_hash_copy_indirect (dir, ind);
+ _bfd_elf_link_hash_copy_indirect (bed, dir, ind);
+}
+
+/* Set a flag, used by ppc64_elf_gc_mark_hook, on the entry symbol and
+ symbols undefined on the command-line. */
+
+boolean
+ppc64_elf_mark_entry_syms (info)
+ struct bfd_link_info *info;
+{
+ struct ppc_link_hash_table *htab;
+ struct bfd_sym_chain *sym;
+
+ htab = ppc_hash_table (info);
+ for (sym = info->gc_sym_list; sym; sym = sym->next)
+ {
+ struct elf_link_hash_entry *h;
+
+ h = elf_link_hash_lookup (&htab->elf, sym->name, false, false, false);
+ if (h != NULL)
+ ((struct ppc_link_hash_entry *) h)->is_entry = 1;
+ }
+ return true;
}
/* Look through the relocs for a section during the first phase, and
@@ -2202,7 +2533,7 @@ ppc64_elf_check_relocs (abfd, info, sec, relocs)
const Elf_Internal_Rela *rel;
const Elf_Internal_Rela *rel_end;
asection *sreloc;
- boolean is_opd;
+ asection **opd_sym_map;
if (info->relocateable)
return true;
@@ -2212,12 +2543,34 @@ ppc64_elf_check_relocs (abfd, info, sec, relocs)
sym_hashes = elf_sym_hashes (abfd);
sym_hashes_end = (sym_hashes
- + symtab_hdr->sh_size / sizeof (Elf64_External_Sym));
- if (!elf_bad_symtab (abfd))
- sym_hashes_end -= symtab_hdr->sh_info;
+ + symtab_hdr->sh_size / sizeof (Elf64_External_Sym)
+ - symtab_hdr->sh_info);
sreloc = NULL;
- is_opd = strcmp (bfd_get_section_name (abfd, sec), ".opd") == 0;
+ opd_sym_map = NULL;
+ if (strcmp (bfd_get_section_name (abfd, sec), ".opd") == 0)
+ {
+ /* Garbage collection needs some extra help with .opd sections.
+ We don't want to necessarily keep everything referenced by
+ relocs in .opd, as that would keep all functions. Instead,
+ if we reference an .opd symbol (a function descriptor), we
+ want to keep the function code symbol's section. This is
+ easy for global symbols, but for local syms we need to keep
+ information about the associated function section. Later, if
+ edit_opd deletes entries, we'll use this array to adjust
+ local syms in .opd. */
+ union opd_info {
+ asection *func_section;
+ long entry_adjust;
+ };
+ bfd_size_type amt;
+
+ amt = sec->_raw_size * sizeof (union opd_info) / 24;
+ opd_sym_map = (asection **) bfd_zalloc (abfd, amt);
+ if (opd_sym_map == NULL)
+ return false;
+ elf_section_data (sec)->tdata = opd_sym_map;
+ }
if (htab->elf.dynobj == NULL)
htab->elf.dynobj = abfd;
@@ -2286,14 +2639,14 @@ ppc64_elf_check_relocs (abfd, info, sec, relocs)
case R_PPC64_PLT32:
case R_PPC64_PLT64:
/* This symbol requires a procedure linkage table entry. We
- actually build the entry in adjust_dynamic_symbol,
- because this might be a case of linking PIC code without
- linking in any dynamic objects, in which case we don't
- need to generate a procedure linkage table after all. */
+ actually build the entry in adjust_dynamic_symbol,
+ because this might be a case of linking PIC code without
+ linking in any dynamic objects, in which case we don't
+ need to generate a procedure linkage table after all. */
if (h == NULL)
{
/* It does not make sense to have a procedure linkage
- table entry for a local symbol. */
+ table entry for a local symbol. */
bfd_set_error (bfd_error_bad_value);
return false;
}
@@ -2334,6 +2687,12 @@ ppc64_elf_check_relocs (abfd, info, sec, relocs)
return false;
break;
+ case R_PPC64_REL14:
+ case R_PPC64_REL14_BRTAKEN:
+ case R_PPC64_REL14_BRNTAKEN:
+ htab->has_14bit_branch = 1;
+ /* Fall through. */
+
case R_PPC64_REL24:
if (h != NULL
&& h->root.root.string[0] == '.'
@@ -2348,7 +2707,7 @@ ppc64_elf_check_relocs (abfd, info, sec, relocs)
break;
case R_PPC64_ADDR64:
- if (is_opd
+ if (opd_sym_map != NULL
&& h != NULL
&& h->root.root.string[0] == '.'
&& h->root.root.string[1] != 0)
@@ -2359,22 +2718,31 @@ ppc64_elf_check_relocs (abfd, info, sec, relocs)
false, false, false);
if (fdh != NULL)
{
- /* Ensure the function descriptor symbol string is
- part of the code symbol string. We aren't
- changing the name here, just allowing some tricks
- in ppc64_elf_hide_symbol. */
- fdh->root.root.string = h->root.root.string + 1;
((struct ppc_link_hash_entry *) fdh)->is_func_descriptor = 1;
+ ((struct ppc_link_hash_entry *) fdh)->oh = h;
((struct ppc_link_hash_entry *) h)->is_func = 1;
+ ((struct ppc_link_hash_entry *) h)->oh = fdh;
}
}
+ if (opd_sym_map != NULL
+ && h == NULL
+ && rel + 1 < rel_end
+ && ((enum elf_ppc_reloc_type) ELF64_R_TYPE ((rel + 1)->r_info)
+ == R_PPC64_TOC))
+ {
+ asection *s;
+
+ s = bfd_section_from_r_symndx (abfd, &htab->sym_sec, sec,
+ r_symndx);
+ if (s == NULL)
+ return false;
+ else if (s != sec)
+ opd_sym_map[rel->r_offset / 24] = s;
+ }
/* Fall through. */
case R_PPC64_REL64:
case R_PPC64_REL32:
- case R_PPC64_REL14:
- case R_PPC64_REL14_BRTAKEN:
- case R_PPC64_REL14_BRNTAKEN:
case R_PPC64_ADDR14:
case R_PPC64_ADDR14_BRNTAKEN:
case R_PPC64_ADDR14_BRTAKEN:
@@ -2396,7 +2764,7 @@ ppc64_elf_check_relocs (abfd, info, sec, relocs)
case R_PPC64_UADDR64:
case R_PPC64_TOC:
/* Don't propagate .opd relocs. */
- if (NO_OPD_RELOCS && is_opd)
+ if (NO_OPD_RELOCS && opd_sym_map != NULL)
break;
/* If we are creating a shared library, and this is a reloc
@@ -2537,16 +2905,19 @@ ppc64_elf_check_relocs (abfd, info, sec, relocs)
relocation. */
static asection *
-ppc64_elf_gc_mark_hook (abfd, info, rel, h, sym)
- bfd *abfd;
+ppc64_elf_gc_mark_hook (sec, info, rel, h, sym)
+ asection *sec;
struct bfd_link_info *info ATTRIBUTE_UNUSED;
Elf_Internal_Rela *rel;
struct elf_link_hash_entry *h;
Elf_Internal_Sym *sym;
{
+ asection *rsec = NULL;
+
if (h != NULL)
{
enum elf_ppc_reloc_type r_type;
+ struct ppc_link_hash_entry *fdh;
r_type = (enum elf_ppc_reloc_type) ELF64_R_TYPE (rel->r_info);
switch (r_type)
@@ -2560,10 +2931,26 @@ ppc64_elf_gc_mark_hook (abfd, info, rel, h, sym)
{
case bfd_link_hash_defined:
case bfd_link_hash_defweak:
- return h->root.u.def.section;
+ fdh = (struct ppc_link_hash_entry *) h;
+
+ /* Function descriptor syms cause the associated
+ function code sym section to be marked. */
+ if (fdh->is_func_descriptor)
+ rsec = fdh->oh->root.u.def.section;
+
+ /* Function entry syms return NULL if they are in .opd
+ and are not ._start (or others undefined on the ld
+ command line). Thus we avoid marking all function
+ sections, as all functions are referenced in .opd. */
+ else if ((fdh->oh != NULL
+ && ((struct ppc_link_hash_entry *) fdh->oh)->is_entry)
+ || elf_section_data (sec)->tdata == NULL)
+ rsec = h->root.u.def.section;
+ break;
case bfd_link_hash_common:
- return h->root.u.c.p->section;
+ rsec = h->root.u.c.p->section;
+ break;
default:
break;
@@ -2572,10 +2959,17 @@ ppc64_elf_gc_mark_hook (abfd, info, rel, h, sym)
}
else
{
- return bfd_section_from_elf_index (abfd, sym->st_shndx);
+ asection **opd_sym_section;
+
+ rsec = bfd_section_from_elf_index (sec->owner, sym->st_shndx);
+ opd_sym_section = (asection **) elf_section_data (rsec)->tdata;
+ if (opd_sym_section != NULL)
+ rsec = opd_sym_section[sym->st_value / 24];
+ else if (elf_section_data (sec)->tdata != NULL)
+ rsec = NULL;
}
- return NULL;
+ return rsec;
}
/* Update the .got, .plt. and dynamic reloc reference counts for the
@@ -2642,6 +3036,9 @@ ppc64_elf_gc_sweep_hook (abfd, info, sec, relocs)
}
break;
+ case R_PPC64_REL14:
+ case R_PPC64_REL14_BRNTAKEN:
+ case R_PPC64_REL14_BRTAKEN:
case R_PPC64_REL24:
if (r_symndx >= symtab_hdr->sh_info)
{
@@ -2651,9 +3048,6 @@ ppc64_elf_gc_sweep_hook (abfd, info, sec, relocs)
}
break;
- case R_PPC64_REL14:
- case R_PPC64_REL14_BRNTAKEN:
- case R_PPC64_REL14_BRTAKEN:
case R_PPC64_REL32:
case R_PPC64_REL64:
if (r_symndx >= symtab_hdr->sh_info)
@@ -2758,14 +3152,15 @@ func_desc_adjust (h, inf)
&& h->root.root.string[0] == '.'
&& h->root.root.string[1] != '\0')
{
- struct elf_link_hash_entry *fdh;
+ struct elf_link_hash_entry *fdh = ((struct ppc_link_hash_entry *) h)->oh;
boolean force_local;
/* Find the corresponding function descriptor symbol. Create it
as undefined if necessary. */
- fdh = elf_link_hash_lookup (&htab->elf, h->root.root.string + 1,
- false, false, true);
+ if (fdh == NULL)
+ fdh = elf_link_hash_lookup (&htab->elf, h->root.root.string + 1,
+ false, false, true);
if (fdh == NULL
&& info->shared
@@ -2814,7 +3209,8 @@ func_desc_adjust (h, inf)
fdh->elf_link_hash_flags |= ELF_LINK_HASH_NEEDS_PLT;
}
((struct ppc_link_hash_entry *) fdh)->is_func_descriptor = 1;
- fdh->root.root.string = h->root.root.string + 1;
+ ((struct ppc_link_hash_entry *) fdh)->oh = h;
+ ((struct ppc_link_hash_entry *) h)->oh = fdh;
}
/* Now that the info is on the function descriptor, clear the
@@ -2824,8 +3220,14 @@ func_desc_adjust (h, inf)
been imported from another library. Function code syms that
are really in the library we must leave global to prevent the
linker dragging in a definition from a static library. */
- force_local = ((h->elf_link_hash_flags & ELF_LINK_HASH_DEF_REGULAR) == 0
- && info->shared);
+ force_local = (info->shared
+ && ((h->elf_link_hash_flags
+ & ELF_LINK_HASH_DEF_REGULAR) == 0
+ || fdh == NULL
+ || (fdh->elf_link_hash_flags
+ & ELF_LINK_HASH_DEF_REGULAR) == 0
+ || (fdh->elf_link_hash_flags
+ & ELF_LINK_FORCED_LOCAL) != 0));
_bfd_elf_link_hash_hide_symbol (info, h, force_local);
}
@@ -2947,7 +3349,6 @@ ppc64_elf_func_desc_adjust (obfd, info)
bfd_put_32 (htab->elf.dynobj, BLR, p);
}
- elf_link_hash_traverse (&htab->elf, func_desc_adjust, (PTR) info);
return true;
}
@@ -3091,18 +3492,362 @@ ppc64_elf_hide_symbol (info, h, force_local)
if (((struct ppc_link_hash_entry *) h)->is_func_descriptor)
{
- const char *name;
- struct elf_link_hash_entry *fh;
- struct ppc_link_hash_table *htab;
+ struct elf_link_hash_entry *fh = ((struct ppc_link_hash_entry *) h)->oh;
- name = h->root.root.string - 1;
- htab = ppc_hash_table (info);
- fh = elf_link_hash_lookup (&htab->elf, name, false, false, false);
+ if (fh == NULL)
+ {
+ const char *p, *q;
+ struct ppc_link_hash_table *htab;
+ char save;
+
+ /* We aren't supposed to use alloca in BFD because on
+ systems which do not have alloca the version in libiberty
+ calls xmalloc, which might cause the program to crash
+ when it runs out of memory. This function doesn't have a
+ return status, so there's no way to gracefully return an
+ error. So cheat. We know that string[-1] can be safely
+ dereferenced; It's either a string in an ELF string
+ table, or allocated in an objalloc structure. */
+
+ p = h->root.root.string - 1;
+ save = *p;
+ *(char *) p = '.';
+ htab = ppc_hash_table (info);
+ fh = elf_link_hash_lookup (&htab->elf, p, false, false, false);
+ *(char *) p = save;
+
+ /* Unfortunately, if it so happens that the string we were
+ looking for was allocated immediately before this string,
+ then we overwrote the string terminator. That's the only
+ reason the lookup should fail. */
+ if (fh == NULL)
+ {
+ q = h->root.root.string + strlen (h->root.root.string);
+ while (q >= h->root.root.string && *q == *p)
+ --q, --p;
+ if (q < h->root.root.string && *p == '.')
+ fh = elf_link_hash_lookup (&htab->elf, p, false, false, false);
+ }
+ if (fh != NULL)
+ {
+ ((struct ppc_link_hash_entry *) h)->oh = fh;
+ ((struct ppc_link_hash_entry *) fh)->oh = h;
+ }
+ }
if (fh != NULL)
_bfd_elf_link_hash_hide_symbol (info, fh, force_local);
}
}
+static boolean
+edit_opd (obfd, info)
+ bfd *obfd;
+ struct bfd_link_info *info;
+{
+ bfd *ibfd;
+ unsigned int bfd_indx;
+
+ for (bfd_indx = 0, ibfd = info->input_bfds;
+ ibfd != NULL;
+ ibfd = ibfd->link_next, bfd_indx++)
+ {
+ asection *sec;
+ Elf_Internal_Rela *relstart, *rel, *relend;
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Sym *local_syms;
+ struct elf_link_hash_entry **sym_hashes;
+ bfd_vma offset;
+ long *adjust;
+ boolean need_edit;
+
+ sec = bfd_get_section_by_name (ibfd, ".opd");
+ if (sec == NULL)
+ continue;
+
+ adjust = (long *) elf_section_data (sec)->tdata;
+ BFD_ASSERT (adjust != NULL);
+ memset (adjust, 0, (size_t) sec->_raw_size * sizeof (long) / 24);
+
+ if (sec->output_section == bfd_abs_section_ptr)
+ continue;
+
+ /* Look through the section relocs. */
+ if ((sec->flags & SEC_RELOC) == 0 || sec->reloc_count == 0)
+ continue;
+
+ local_syms = NULL;
+ symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
+ sym_hashes = elf_sym_hashes (ibfd);
+
+ /* Read the relocations. */
+ relstart = _bfd_elf64_link_read_relocs (obfd, sec, (PTR) NULL,
+ (Elf_Internal_Rela *) NULL,
+ info->keep_memory);
+ if (relstart == NULL)
+ return false;
+
+ /* First run through the relocs to check they are sane, and to
+ determine whether we need to edit this opd section. */
+ need_edit = false;
+ offset = 0;
+ relend = relstart + sec->reloc_count;
+ for (rel = relstart; rel < relend; rel++)
+ {
+ enum elf_ppc_reloc_type r_type;
+ unsigned long r_symndx;
+ asection *sym_sec;
+ struct elf_link_hash_entry *h;
+ Elf_Internal_Sym *sym;
+
+ /* .opd contains a regular array of 24 byte entries. We're
+ only interested in the reloc pointing to a function entry
+ point. */
+ r_type = (enum elf_ppc_reloc_type) ELF64_R_TYPE (rel->r_info);
+ if (r_type == R_PPC64_TOC)
+ continue;
+
+ if (r_type != R_PPC64_ADDR64)
+ {
+ (*_bfd_error_handler)
+ (_("%s: unexpected reloc type %u in .opd section"),
+ bfd_archive_filename (ibfd), r_type);
+ need_edit = false;
+ break;
+ }
+
+ if (rel + 1 >= relend)
+ continue;
+ r_type = (enum elf_ppc_reloc_type) ELF64_R_TYPE ((rel + 1)->r_info);
+ if (r_type != R_PPC64_TOC)
+ continue;
+
+ if (rel->r_offset != offset)
+ {
+ /* If someone messes with .opd alignment then after a
+ "ld -r" we might have padding in the middle of .opd.
+ Also, there's nothing to prevent someone putting
+ something silly in .opd with the assembler. No .opd
+ optimization for them! */
+ (*_bfd_error_handler)
+ (_("%s: .opd is not a regular array of opd entries"),
+ bfd_archive_filename (ibfd));
+ need_edit = false;
+ break;
+ }
+
+ r_symndx = ELF64_R_SYM (rel->r_info);
+ sym_sec = NULL;
+ h = NULL;
+ sym = NULL;
+ if (r_symndx >= symtab_hdr->sh_info)
+ {
+ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+ if (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ sym_sec = h->root.u.def.section;
+ }
+ else
+ {
+ if (local_syms == NULL)
+ {
+ local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
+ if (local_syms == NULL)
+ local_syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
+ symtab_hdr->sh_info, 0,
+ NULL, NULL, NULL);
+ if (local_syms == NULL)
+ goto error_free_rel;
+ }
+ sym = local_syms + r_symndx;
+ if ((sym->st_shndx != SHN_UNDEF
+ && sym->st_shndx < SHN_LORESERVE)
+ || sym->st_shndx > SHN_HIRESERVE)
+ sym_sec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
+ }
+
+ if (sym_sec == NULL || sym_sec->owner == NULL)
+ {
+ (*_bfd_error_handler)
+ (_("%s: undefined sym `%s' in .opd section"),
+ bfd_archive_filename (ibfd),
+ h != NULL ? h->root.root.string : "<local symbol>");
+ need_edit = false;
+ break;
+ }
+
+ /* opd entries are always for functions defined in the
+ current input bfd. If the symbol isn't defined in the
+ input bfd, then we won't be using the function in this
+ bfd; It must be defined in a linkonce section in another
+ bfd, or is weak. It's also possible that we are
+ discarding the function due to a linker script /DISCARD/,
+ which we test for via the output_section. */
+ if (sym_sec->owner != ibfd
+ || sym_sec->output_section == bfd_abs_section_ptr)
+ need_edit = true;
+
+ offset += 24;
+ }
+
+ if (need_edit)
+ {
+ Elf_Internal_Rela *write_rel;
+ bfd_byte *rptr, *wptr;
+ boolean skip;
+
+ /* This seems a waste of time as input .opd sections are all
+ zeros as generated by gcc, but I suppose there's no reason
+ this will always be so. We might start putting something in
+ the third word of .opd entries. */
+ if ((sec->flags & SEC_IN_MEMORY) == 0)
+ {
+ bfd_byte *loc = bfd_alloc (ibfd, sec->_raw_size);
+ if (loc == NULL
+ || !bfd_get_section_contents (ibfd, sec, loc, (bfd_vma) 0,
+ sec->_raw_size))
+ {
+ if (local_syms != NULL
+ && symtab_hdr->contents != (unsigned char *) local_syms)
+ free (local_syms);
+ error_free_rel:
+ if (elf_section_data (sec)->relocs != relstart)
+ free (relstart);
+ return false;
+ }
+ sec->contents = loc;
+ sec->flags |= (SEC_IN_MEMORY | SEC_HAS_CONTENTS);
+ }
+
+ elf_section_data (sec)->relocs = relstart;
+
+ wptr = sec->contents;
+ rptr = sec->contents;
+ write_rel = relstart;
+ skip = false;
+ offset = 0;
+ for (rel = relstart; rel < relend; rel++)
+ {
+ if (rel->r_offset == offset)
+ {
+ unsigned long r_symndx;
+ asection *sym_sec;
+ struct elf_link_hash_entry *h;
+ Elf_Internal_Sym *sym;
+
+ r_symndx = ELF64_R_SYM (rel->r_info);
+ sym_sec = NULL;
+ h = NULL;
+ sym = NULL;
+ if (r_symndx >= symtab_hdr->sh_info)
+ {
+ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+ if (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ sym_sec = h->root.u.def.section;
+ }
+ else
+ {
+ sym = local_syms + r_symndx;
+ if ((sym->st_shndx != SHN_UNDEF
+ && sym->st_shndx < SHN_LORESERVE)
+ || sym->st_shndx > SHN_HIRESERVE)
+ sym_sec = bfd_section_from_elf_index (ibfd,
+ sym->st_shndx);
+ }
+
+ skip = (sym_sec->owner != ibfd
+ || sym_sec->output_section == bfd_abs_section_ptr);
+ if (skip)
+ {
+ if (h != NULL && sym_sec->owner == ibfd)
+ {
+ /* Arrange for the function descriptor sym
+ to be dropped. */
+ struct elf_link_hash_entry *fdh;
+ struct ppc_link_hash_entry *fh;
+
+ fh = (struct ppc_link_hash_entry *) h;
+ BFD_ASSERT (fh->is_func);
+ fdh = fh->oh;
+ fdh->root.u.def.value = 0;
+ fdh->root.u.def.section = sym_sec;
+ }
+ }
+ else
+ {
+ /* We'll be keeping this opd entry. */
+
+ if (h != NULL)
+ {
+ /* Redefine the function descriptor symbol
+ to this location in the opd section.
+ We've checked above that opd relocs are
+ ordered. */
+ struct elf_link_hash_entry *fdh;
+ struct ppc_link_hash_entry *fh;
+
+ fh = (struct ppc_link_hash_entry *) h;
+ BFD_ASSERT (fh->is_func);
+ fdh = fh->oh;
+ fdh->root.u.def.value = wptr - sec->contents;
+ }
+ else
+ {
+ /* Local syms are a bit tricky. We could
+ tweak them as they can be cached, but
+ we'd need to look through the local syms
+ for the function descriptor sym which we
+ don't have at the moment. So keep an
+ array of adjustments. */
+ adjust[(rel->r_offset + wptr - rptr) / 24]
+ = wptr - rptr;
+ }
+
+ if (wptr != rptr)
+ memcpy (wptr, rptr, 24);
+ wptr += 24;
+ }
+ rptr += 24;
+ offset += 24;
+ }
+
+ /* We need to adjust any reloc offsets to point to the
+ new opd entries. While we're at it, we may as well
+ remove redundant relocs. */
+ if (!skip)
+ {
+ rel->r_offset += wptr - rptr;
+ if (write_rel != rel)
+ memcpy (write_rel, rel, sizeof (*rel));
+ ++write_rel;
+ }
+ }
+
+ sec->_cooked_size = wptr - sec->contents;
+ sec->reloc_count = write_rel - relstart;
+ }
+ else if (elf_section_data (sec)->relocs != relstart)
+ free (relstart);
+
+ if (local_syms != NULL
+ && symtab_hdr->contents != (unsigned char *) local_syms)
+ {
+ if (!info->keep_memory)
+ free (local_syms);
+ else
+ symtab_hdr->contents = (unsigned char *) local_syms;
+ }
+ }
+
+ return true;
+}
+
/* This is the condition under which ppc64_elf_finish_dynamic_symbol
will be called from elflink.h. If elflink.h doesn't call our
finish_dynamic_symbol routine, we'll need to do something about
@@ -3156,10 +3901,7 @@ allocate_dynrelocs (h, inf)
/* Make room for this entry. */
s->_raw_size += PLT_ENTRY_SIZE;
- /* Make room for the .stub and .glink code. */
- s = htab->sstub;
- s->_raw_size += PLT_CALL_STUB_SIZE;
-
+ /* Make room for the .glink code. */
s = htab->sglink;
if (s->_raw_size == 0)
s->_raw_size += GLINK_CALL_STUB_SIZE;
@@ -3404,6 +4146,9 @@ ppc64_elf_size_dynamic_sections (output_bfd, info)
}
}
+ if (!edit_opd (output_bfd, info))
+ return false;
+
/* Allocate global sym .plt and .got entries, and space for global
sym dynamic relocs. */
elf_link_hash_traverse (&htab->elf, allocate_dynrelocs, (PTR) info);
@@ -3413,15 +4158,15 @@ ppc64_elf_size_dynamic_sections (output_bfd, info)
relocs = false;
for (s = dynobj->sections; s != NULL; s = s->next)
{
- bfd_vma size;
-
if ((s->flags & SEC_LINKER_CREATED) == 0)
continue;
- if (s == htab->splt
- || s == htab->sgot
- || s == htab->sstub
- || s == htab->sglink)
+ if (s == htab->sbrlt || s == htab->srelbrlt)
+ /* These haven't been allocated yet; don't strip. */
+ continue;
+ else if (s == htab->splt
+ || s == htab->sgot
+ || s == htab->sglink)
{
/* Strip this section if we don't need it; see the
comment below. */
@@ -3462,18 +4207,16 @@ ppc64_elf_size_dynamic_sections (output_bfd, info)
continue;
}
+ /* .plt is in the bss section. We don't initialise it. */
+ if ((s->flags & SEC_LOAD) == 0)
+ continue;
+
/* Allocate memory for the section contents. We use bfd_zalloc
here in case unused entries are not reclaimed before the
section's contents are written out. This should not happen,
but this way if it does, we get a R_PPC64_NONE reloc instead
of garbage. */
- size = s->_raw_size;
- if (s == htab->sstub)
- {
- /* .stub may grow. Allocate enough for the maximum growth. */
- size += (size + 65536 + 65535) / 65536 * 4;
- }
- s->contents = (bfd_byte *) bfd_zalloc (dynobj, size);
+ s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->_raw_size);
if (s->contents == NULL)
return false;
}
@@ -3494,7 +4237,7 @@ ppc64_elf_size_dynamic_sections (output_bfd, info)
return false;
}
- if (htab->splt->_raw_size != 0)
+ if (htab->splt != NULL && htab->splt->_raw_size != 0)
{
if (!add_dynamic_entry (DT_PLTGOT, 0)
|| !add_dynamic_entry (DT_PLTRELSZ, 0)
@@ -3536,116 +4279,55 @@ ppc64_elf_size_dynamic_sections (output_bfd, info)
return true;
}
-/* Called after we have determined section placement. If sections
- move, we'll be called again. Provide a value for TOCstart. */
+/* Determine the type of stub needed, if any, for a call. */
-bfd_vma
-ppc64_elf_toc (obfd)
- bfd *obfd;
+static INLINE enum ppc_stub_type
+ppc_type_of_stub (input_sec, rel, hash, destination)
+ asection *input_sec;
+ const Elf_Internal_Rela *rel;
+ struct ppc_link_hash_entry **hash;
+ bfd_vma destination;
{
- asection *s;
- bfd_vma TOCstart;
+ struct ppc_link_hash_entry *h = *hash;
+ bfd_vma location;
+ bfd_vma branch_offset;
+ bfd_vma max_branch_offset;
+ unsigned int r_type;
- /* The TOC consists of sections .got, .toc, .tocbss, .plt in that
- order. The TOC starts where the first of these sections starts. */
- s = bfd_get_section_by_name (obfd, ".got");
- if (s == NULL)
- s = bfd_get_section_by_name (obfd, ".toc");
- if (s == NULL)
- s = bfd_get_section_by_name (obfd, ".tocbss");
- if (s == NULL)
- s = bfd_get_section_by_name (obfd, ".plt");
- if (s == NULL)
+ if (h != NULL)
{
- /* This may happen for
- o references to TOC base (SYM@toc / TOC[tc0]) without a
- .toc directive
- o bad linker script
- o --gc-sections and empty TOC sections
-
- FIXME: Warn user? */
+ if (h->oh != NULL
+ && h->oh->plt.offset != (bfd_vma) -1
+ && h->oh->dynindx != -1)
+ {
+ *hash = (struct ppc_link_hash_entry *) h->oh;
+ return ppc_stub_plt_call;
+ }
- /* Look for a likely section. We probably won't even be
- using TOCstart. */
- for (s = obfd->sections; s != NULL; s = s->next)
- if ((s->flags & (SEC_ALLOC | SEC_SMALL_DATA | SEC_READONLY))
- == (SEC_ALLOC | SEC_SMALL_DATA))
- break;
- if (s == NULL)
- for (s = obfd->sections; s != NULL; s = s->next)
- if ((s->flags & (SEC_ALLOC | SEC_SMALL_DATA))
- == (SEC_ALLOC | SEC_SMALL_DATA))
- break;
- if (s == NULL)
- for (s = obfd->sections; s != NULL; s = s->next)
- if ((s->flags & (SEC_ALLOC | SEC_READONLY)) == SEC_ALLOC)
- break;
- if (s == NULL)
- for (s = obfd->sections; s != NULL; s = s->next)
- if ((s->flags & SEC_ALLOC) == SEC_ALLOC)
- break;
+ if (h->elf.root.type == bfd_link_hash_undefweak
+ || h->elf.root.type == bfd_link_hash_undefined)
+ return ppc_stub_none;
}
- TOCstart = 0;
- if (s != NULL)
- TOCstart = s->output_section->vma + s->output_offset;
-
- return TOCstart;
-}
-
-/* PowerPC64 .plt entries are 24 bytes long, which doesn't divide
- evenly into 64k. Sometimes with a large enough .plt, we'll need to
- use offsets differing in the high 16 bits when accessing a .plt
- entry from a .plt call stub. This function adjusts the size of
- .stub to accommodate the extra stub instruction needed in such
- cases. */
-
-boolean
-ppc64_elf_size_stubs (obfd, info, changed)
- bfd *obfd;
- struct bfd_link_info *info;
- int *changed;
-{
- struct ppc_link_hash_table *htab = ppc_hash_table (info);
- bfd_vma plt_offset, next_64k;
- long base, num, extra;
-
- /* .plt and .stub should be both present, or both absent. */
- if ((htab->splt == NULL || htab->splt->_raw_size == 0)
- != (htab->sstub == NULL || htab->sstub->_raw_size == 0))
- abort ();
+ /* Determine where the call point is. */
+ location = (input_sec->output_offset
+ + input_sec->output_section->vma
+ + rel->r_offset);
- /* If no .plt, then nothing to do. */
- if (htab->splt == NULL || htab->splt->_raw_size == 0)
- return true;
+ branch_offset = destination - location;
+ r_type = ELF64_R_TYPE (rel->r_info);
- plt_offset = (htab->splt->output_section->vma
- + htab->splt->output_offset
- - elf_gp (obfd));
- next_64k = (plt_offset + 65535) & -65536;
+ /* Determine if a long branch stub is needed. */
+ max_branch_offset = 1 << 25;
+ if (r_type != (unsigned int) R_PPC64_REL24)
+ max_branch_offset = 1 << 15;
- /* If the .plt doesn't have any entries crossing a 64k boundary,
- then there is no need for bigger stubs. */
- if (plt_offset + htab->splt->_raw_size <= next_64k)
- return true;
+ if (branch_offset + max_branch_offset >= 2 * max_branch_offset)
+ /* We need a stub. Figure out whether a long_branch or plt_branch
+ is needed later. */
+ return ppc_stub_long_branch;
- /* OK, so we have at least one transition. Since .plt entries are
- 24 bytes long, we'll strike it lucky every 3*64k, with the 64k
- boundary between .plt entries. */
- base = next_64k / 65536;
- num = (plt_offset + htab->splt->_raw_size - next_64k) / 65536;
- extra = (base % 3 + num + 1) * 2 / 3;
-
- /* Allow one extra instruction for each EXTRA. The change in .stub
- may change the location of .toc and .plt. .toc and .plt ought to
- move as a group, but someone might be playing with eg. .plt
- alignment, so don't allow .stub size to decrease. */
- if (htab->sstub->_cooked_size < htab->sstub->_raw_size + extra * 4)
- {
- htab->sstub->_cooked_size = htab->sstub->_raw_size + extra * 4;
- *changed = true;
- }
- return true;
+ return ppc_stub_none;
}
/* Build a .plt call stub. */
@@ -3680,158 +4362,848 @@ build_plt_stub (obfd, p, offset, glink)
return p;
}
-/* Build the stubs for one function call. */
-
static boolean
-build_one_stub (h, inf)
- struct elf_link_hash_entry *h;
- PTR inf;
+ppc_build_one_stub (gen_entry, in_arg)
+ struct bfd_hash_entry *gen_entry;
+ PTR in_arg;
{
+ struct ppc_stub_hash_entry *stub_entry;
+ struct ppc_branch_hash_entry *br_entry;
struct bfd_link_info *info;
struct ppc_link_hash_table *htab;
+ asection *stub_sec;
+ bfd *stub_bfd;
+ bfd_byte *loc;
+ bfd_byte *p;
+ unsigned int indx;
+ bfd_vma off;
+ int size;
- if (h->root.type == bfd_link_hash_indirect
- || h->root.type == bfd_link_hash_warning)
- return true;
+ /* Massage our args to the form they really have. */
+ stub_entry = (struct ppc_stub_hash_entry *) gen_entry;
+ info = (struct bfd_link_info *) in_arg;
- info = (struct bfd_link_info *) inf;
htab = ppc_hash_table (info);
+ stub_sec = stub_entry->stub_sec;
- if (htab->elf.dynamic_sections_created
- && h->plt.offset != (bfd_vma) -1
- && ((struct ppc_link_hash_entry *) h)->is_func_descriptor)
+ /* Make a note of the offset within the stubs for this entry. */
+ stub_entry->stub_offset = stub_sec->_cooked_size;
+ loc = stub_sec->contents + stub_entry->stub_offset;
+
+ stub_bfd = stub_sec->owner;
+
+ switch (stub_entry->stub_type)
{
- struct elf_link_hash_entry *fh;
- asection *s;
- bfd_vma plt_r2;
- bfd_byte *p;
- unsigned int indx;
+ case ppc_stub_long_branch:
+ /* Branches are relative. This is where we are going to. */
+ off = (stub_entry->target_value
+ + stub_entry->target_section->output_offset
+ + stub_entry->target_section->output_section->vma);
- fh = elf_link_hash_lookup (&htab->elf, h->root.root.string - 1,
- false, false, true);
+ /* And this is where we are coming from. */
+ off -= (stub_entry->stub_offset
+ + stub_sec->output_offset
+ + stub_sec->output_section->vma);
- if (fh == NULL)
- abort ();
+ BFD_ASSERT (off + (1 << 25) < (bfd_vma) (1 << 26));
- BFD_ASSERT (((struct ppc_link_hash_entry *) fh)->is_func);
+ bfd_put_32 (stub_bfd, (bfd_vma) B_DOT | (off & 0x3fffffc), loc);
+ size = 4;
+ break;
- /* Build the .plt call stub. */
- plt_r2 = (htab->splt->output_section->vma
- + htab->splt->output_offset
- + h->plt.offset
- - elf_gp (htab->splt->output_section->owner)
- - TOC_BASE_OFF);
+ case ppc_stub_plt_branch:
+ br_entry = ppc_branch_hash_lookup (&htab->branch_hash_table,
+ stub_entry->root.string + 9,
+ false, false);
+ if (br_entry == NULL)
+ {
+ (*_bfd_error_handler) (_("can't find branch stub `%s'"),
+ stub_entry->root.string + 9);
+ htab->stub_error = true;
+ return false;
+ }
+
+ off = (stub_entry->target_value
+ + stub_entry->target_section->output_offset
+ + stub_entry->target_section->output_section->vma);
+
+ bfd_put_64 (htab->sbrlt->owner, off,
+ htab->sbrlt->contents + br_entry->offset);
+
+ if (info->shared)
+ {
+ /* Create a reloc for the branch lookup table entry. */
+ Elf_Internal_Rela rela;
+ Elf64_External_Rela *r;
+
+ rela.r_offset = (br_entry->offset
+ + htab->sbrlt->output_offset
+ + htab->sbrlt->output_section->vma);
+ rela.r_info = ELF64_R_INFO (0, R_PPC64_RELATIVE);
+ rela.r_addend = off;
+
+ r = (Elf64_External_Rela *) htab->srelbrlt->contents;
+ r += htab->srelbrlt->reloc_count++;
+ bfd_elf64_swap_reloca_out (htab->srelbrlt->owner, &rela, r);
+ }
+
+ off = (br_entry->offset
+ + htab->sbrlt->output_offset
+ + htab->sbrlt->output_section->vma
+ - elf_gp (htab->sbrlt->output_section->owner)
+ - TOC_BASE_OFF);
- if (plt_r2 + 0x80000000 > 0xffffffff
- || (plt_r2 & 3) != 0)
+ if (off + 0x80000000 > 0xffffffff || (off & 7) != 0)
{
(*_bfd_error_handler)
(_("linkage table error against `%s'"),
- h->root.root.string);
+ stub_entry->root.string);
bfd_set_error (bfd_error_bad_value);
- htab->plt_overflow = true;
+ htab->stub_error = true;
return false;
}
- s = htab->sstub;
- /* Steal plt.offset to store the stub offset. */
- fh->plt.offset = s->_cooked_size;
- p = s->contents + s->_cooked_size;
- p = build_plt_stub (s->owner, p, (int) plt_r2, 0);
- s->_cooked_size = p - s->contents;
-
- /* Build the .glink lazy link call stub. */
- s = htab->sglink;
- p = s->contents + s->_cooked_size;
- indx = s->reloc_count;
- if (indx < 0x8000)
+ indx = off;
+ bfd_put_32 (stub_bfd, (bfd_vma) ADDIS_R12_R2 | PPC_HA (indx), loc);
+ bfd_put_32 (stub_bfd, (bfd_vma) LD_R11_0R12 | PPC_LO (indx), loc + 4);
+ bfd_put_32 (stub_bfd, (bfd_vma) MTCTR_R11, loc + 8);
+ bfd_put_32 (stub_bfd, (bfd_vma) BCTR, loc + 12);
+ size = 16;
+ break;
+
+ case ppc_stub_plt_call:
+ /* Do the best we can for shared libraries built without
+ exporting ".foo" for each "foo". This can happen when symbol
+ versioning scripts strip all bar a subset of symbols. */
+ if (stub_entry->h->oh->root.type != bfd_link_hash_defined
+ && stub_entry->h->oh->root.type != bfd_link_hash_defweak)
{
- bfd_put_32 (s->owner, LI_R0_0 | indx, p);
- p += 4;
+ /* Point the symbol at the stub. There may be multiple stubs,
+ we don't really care; The main thing is to make this sym
+ defined somewhere. */
+ stub_entry->h->oh->root.type = bfd_link_hash_defined;
+ stub_entry->h->oh->root.u.def.section = stub_entry->stub_sec;
+ stub_entry->h->oh->root.u.def.value = stub_entry->stub_offset;
}
- else
+
+ /* Now build the stub. */
+ off = stub_entry->h->elf.plt.offset;
+ if (off >= (bfd_vma) -2)
+ abort ();
+
+ off &= ~ (bfd_vma) 1;
+ off += (htab->splt->output_offset
+ + htab->splt->output_section->vma
+ - elf_gp (htab->splt->output_section->owner)
+ - TOC_BASE_OFF);
+
+ if (off + 0x80000000 > 0xffffffff || (off & 7) != 0)
{
- bfd_put_32 (s->owner, LIS_R0_0 | PPC_HI (indx), p);
- p += 4;
- bfd_put_32 (s->owner, ORI_R0_R0_0 | PPC_LO (indx), p);
- p += 4;
+ (*_bfd_error_handler)
+ (_("linkage table error against `%s'"),
+ stub_entry->h->elf.root.root.string);
+ bfd_set_error (bfd_error_bad_value);
+ htab->stub_error = true;
+ return false;
}
- bfd_put_32 (s->owner, B_DOT | ((s->contents - p) & 0x3fffffc), p);
- p += 4;
- s->_cooked_size = p - s->contents;
- s->reloc_count += 1;
+
+ p = build_plt_stub (stub_bfd, loc, (int) off, 0);
+ size = p - loc;
+ break;
+
+ default:
+ BFD_FAIL ();
+ return false;
}
+
+ stub_sec->_cooked_size += size;
return true;
}
-boolean
-ppc64_elf_build_stubs (obfd, info)
- bfd *obfd;
+/* As above, but don't actually build the stub. Just bump offset so
+ we know stub section sizes, and select plt_branch stubs where
+ long_branch stubs won't do. */
+
+static boolean
+ppc_size_one_stub (gen_entry, in_arg)
+ struct bfd_hash_entry *gen_entry;
+ PTR in_arg;
+{
+ struct ppc_stub_hash_entry *stub_entry;
+ struct ppc_link_hash_table *htab;
+ bfd_vma off;
+ int size;
+
+ /* Massage our args to the form they really have. */
+ stub_entry = (struct ppc_stub_hash_entry *) gen_entry;
+ htab = (struct ppc_link_hash_table *) in_arg;
+
+ if (stub_entry->stub_type == ppc_stub_plt_call)
+ {
+ off = stub_entry->h->elf.plt.offset & ~(bfd_vma) 1;
+ off += (htab->splt->output_offset
+ + htab->splt->output_section->vma
+ - elf_gp (htab->splt->output_section->owner)
+ - TOC_BASE_OFF);
+
+ size = 28;
+ if (PPC_HA ((int) off + 16) != PPC_HA ((int) off))
+ size += 4;
+ }
+ else
+ {
+ /* ppc_stub_long_branch or ppc_stub_plt_branch. */
+ stub_entry->stub_type = ppc_stub_long_branch;
+ size = 4;
+
+ off = (stub_entry->target_value
+ + stub_entry->target_section->output_offset
+ + stub_entry->target_section->output_section->vma);
+ off -= (stub_entry->stub_sec->_raw_size
+ + stub_entry->stub_sec->output_offset
+ + stub_entry->stub_sec->output_section->vma);
+
+ if (off + (1 << 25) >= (bfd_vma) (1 << 26))
+ {
+ struct ppc_branch_hash_entry *br_entry;
+
+ br_entry = ppc_branch_hash_lookup (&htab->branch_hash_table,
+ stub_entry->root.string + 9,
+ true, false);
+ if (br_entry == NULL)
+ {
+ (*_bfd_error_handler) (_("can't build branch stub `%s'"),
+ stub_entry->root.string + 9);
+ htab->stub_error = true;
+ return false;
+ }
+
+ if (br_entry->iter != htab->stub_iteration)
+ {
+ br_entry->iter = htab->stub_iteration;
+ br_entry->offset = htab->sbrlt->_raw_size;
+ htab->sbrlt->_raw_size += 8;
+ }
+ stub_entry->stub_type = ppc_stub_plt_branch;
+ size = 16;
+ }
+ }
+
+ stub_entry->stub_sec->_raw_size += size;
+ return true;
+}
+
+/* Set up various things so that we can make a list of input sections
+ for each output section included in the link. Returns -1 on error,
+ 0 when no stubs will be needed, and 1 on success. */
+
+int
+ppc64_elf_setup_section_lists (output_bfd, info)
+ bfd *output_bfd;
struct bfd_link_info *info;
{
+ bfd *input_bfd;
+ int top_id, top_index;
+ asection *section;
+ asection **input_list, **list;
+ bfd_size_type amt;
struct ppc_link_hash_table *htab = ppc_hash_table (info);
- bfd_vma old_stub_size;
- bfd_vma plt_r2;
- bfd_byte *p;
- /* If no .plt stubs, then nothing to do. */
- if (htab->sstub == NULL || htab->sstub->_raw_size == 0)
- return true;
+ if (htab->elf.root.creator->flavour != bfd_target_elf_flavour
+ || htab->sbrlt == NULL)
+ return 0;
- old_stub_size = htab->sstub->_cooked_size;
- htab->sstub->_cooked_size = 0;
-
- /* Build the .glink plt call stub. */
- plt_r2 = (htab->splt->output_section->vma
- + htab->splt->output_offset
- - elf_gp (obfd)
- - TOC_BASE_OFF);
- p = htab->sglink->contents;
- p = build_plt_stub (htab->sglink->owner, p, (int) plt_r2, 1);
- while (p - htab->sglink->contents < GLINK_CALL_STUB_SIZE)
+ /* Find the top input section id. */
+ for (input_bfd = info->input_bfds, top_id = 0;
+ input_bfd != NULL;
+ input_bfd = input_bfd->link_next)
{
- bfd_put_32 (htab->sglink->owner, NOP, p);
- p += 4;
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ if (top_id < section->id)
+ top_id = section->id;
+ }
}
- htab->sglink->_cooked_size = p - htab->sglink->contents;
- /* Use reloc_count to count entries. */
- htab->sglink->reloc_count = 0;
+ amt = sizeof (struct map_stub) * (top_id + 1);
+ htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
+ if (htab->stub_group == NULL)
+ return -1;
+
+ /* We can't use output_bfd->section_count here to find the top output
+ section index as some sections may have been removed, and
+ _bfd_strip_section_from_output doesn't renumber the indices. */
+ for (section = output_bfd->sections, top_index = 0;
+ section != NULL;
+ section = section->next)
+ {
+ if (top_index < section->index)
+ top_index = section->index;
+ }
- elf_link_hash_traverse (&htab->elf, build_one_stub, (PTR) info);
- htab->sglink->reloc_count = 0;
+ htab->top_index = top_index;
+ amt = sizeof (asection *) * (top_index + 1);
+ input_list = (asection **) bfd_malloc (amt);
+ htab->input_list = input_list;
+ if (input_list == NULL)
+ return -1;
+
+ /* For sections we aren't interested in, mark their entries with a
+ value we can check later. */
+ list = input_list + top_index;
+ do
+ *list = bfd_abs_section_ptr;
+ while (list-- != input_list);
+
+ for (section = output_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ if ((section->flags & SEC_CODE) != 0)
+ input_list[section->index] = NULL;
+ }
- if (htab->plt_overflow)
- return false;
+ return 1;
+}
- if (old_stub_size != htab->sstub->_cooked_size
- || htab->sglink->_raw_size != htab->sglink->_cooked_size)
+/* The linker repeatedly calls this function for each input section,
+ in the order that input sections are linked into output sections.
+ Build lists of input sections to determine groupings between which
+ we may insert linker stubs. */
+
+void
+ppc64_elf_next_input_section (info, isec)
+ struct bfd_link_info *info;
+ asection *isec;
+{
+ struct ppc_link_hash_table *htab = ppc_hash_table (info);
+
+ if (isec->output_section->index <= htab->top_index)
{
- (*_bfd_error_handler)
- (_("stub section size doesn't match calculated size"));
- bfd_set_error (bfd_error_bad_value);
- return false;
+ asection **list = htab->input_list + isec->output_section->index;
+ if (*list != bfd_abs_section_ptr)
+ {
+ /* Steal the link_sec pointer for our list. */
+#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
+ /* This happens to make the list in reverse order,
+ which is what we want. */
+ PREV_SEC (isec) = *list;
+ *list = isec;
+ }
}
- return true;
}
-/* Set up any other section flags and such that may be necessary. */
+/* See whether we can group stub sections together. Grouping stub
+ sections may result in fewer stubs. More importantly, we need to
+ put all .init* and .fini* stubs at the beginning of the .init or
+ .fini output sections respectively, because glibc splits the
+ _init and _fini functions into multiple parts. Putting a stub in
+ the middle of a function is not a good idea. */
-static boolean
-ppc64_elf_fake_sections (abfd, shdr, asect)
- bfd *abfd ATTRIBUTE_UNUSED;
- Elf64_Internal_Shdr *shdr;
- asection *asect;
+static void
+group_sections (htab, stub_group_size, stubs_always_before_branch)
+ struct ppc_link_hash_table *htab;
+ bfd_size_type stub_group_size;
+ boolean stubs_always_before_branch;
+{
+ asection **list = htab->input_list + htab->top_index;
+ do
+ {
+ asection *tail = *list;
+ if (tail == bfd_abs_section_ptr)
+ continue;
+ while (tail != NULL)
+ {
+ asection *curr;
+ asection *prev;
+ bfd_size_type total;
+
+ curr = tail;
+ if (tail->_cooked_size)
+ total = tail->_cooked_size;
+ else
+ total = tail->_raw_size;
+ while ((prev = PREV_SEC (curr)) != NULL
+ && ((total += curr->output_offset - prev->output_offset)
+ < stub_group_size))
+ curr = prev;
+
+ /* OK, the size from the start of CURR to the end is less
+ than stub_group_size and thus can be handled by one stub
+ section. (or the tail section is itself larger than
+ stub_group_size, in which case we may be toast.) We
+ should really be keeping track of the total size of stubs
+ added here, as stubs contribute to the final output
+ section size. That's a little tricky, and this way will
+ only break if stubs added make the total size more than
+ 2^25, ie. for the default stub_group_size, if stubs total
+ more than 2834432 bytes, or over 100000 plt call stubs. */
+ do
+ {
+ prev = PREV_SEC (tail);
+ /* Set up this stub group. */
+ htab->stub_group[tail->id].link_sec = curr;
+ }
+ while (tail != curr && (tail = prev) != NULL);
+
+ /* But wait, there's more! Input sections up to stub_group_size
+ bytes before the stub section can be handled by it too. */
+ if (!stubs_always_before_branch)
+ {
+ total = 0;
+ while (prev != NULL
+ && ((total += tail->output_offset - prev->output_offset)
+ < stub_group_size))
+ {
+ tail = prev;
+ prev = PREV_SEC (tail);
+ htab->stub_group[tail->id].link_sec = curr;
+ }
+ }
+ tail = prev;
+ }
+ }
+ while (list-- != htab->input_list);
+ free (htab->input_list);
+#undef PREV_SEC
+}
+
+/* Determine and set the size of the stub section for a final link.
+
+ The basic idea here is to examine all the relocations looking for
+ PC-relative calls to a target that is unreachable with a "bl"
+ instruction. */
+
+boolean
+ppc64_elf_size_stubs (output_bfd, stub_bfd, info, group_size,
+ add_stub_section, layout_sections_again)
+ bfd *output_bfd;
+ bfd *stub_bfd;
+ struct bfd_link_info *info;
+ bfd_signed_vma group_size;
+ asection * (*add_stub_section) PARAMS ((const char *, asection *));
+ void (*layout_sections_again) PARAMS ((void));
{
- if ((asect->flags & SEC_EXCLUDE) != 0)
- shdr->sh_flags |= SHF_EXCLUDE;
+ bfd_size_type stub_group_size;
+ boolean stubs_always_before_branch;
+ struct ppc_link_hash_table *htab = ppc_hash_table (info);
+
+ /* Stash our params away. */
+ htab->stub_bfd = stub_bfd;
+ htab->add_stub_section = add_stub_section;
+ htab->layout_sections_again = layout_sections_again;
+ stubs_always_before_branch = group_size < 0;
+ if (group_size < 0)
+ stub_group_size = -group_size;
+ else
+ stub_group_size = group_size;
+ if (stub_group_size == 1)
+ {
+ /* Default values. */
+ stub_group_size = 30720000;
+ if (htab->has_14bit_branch)
+ stub_group_size = 30000;
+ }
+
+ group_sections (htab, stub_group_size, stubs_always_before_branch);
+
+ while (1)
+ {
+ bfd *input_bfd;
+ unsigned int bfd_indx;
+ asection *stub_sec;
+ boolean stub_changed;
+
+ htab->stub_iteration += 1;
+ stub_changed = false;
+
+ for (input_bfd = info->input_bfds, bfd_indx = 0;
+ input_bfd != NULL;
+ input_bfd = input_bfd->link_next, bfd_indx++)
+ {
+ Elf_Internal_Shdr *symtab_hdr;
+ asection *section;
+ Elf_Internal_Sym *local_syms = NULL;
+
+ /* We'll need the symbol table in a second. */
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+ if (symtab_hdr->sh_info == 0)
+ continue;
+
+ /* Walk over each section attached to the input bfd. */
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
+
+ /* If there aren't any relocs, then there's nothing more
+ to do. */
+ if ((section->flags & SEC_RELOC) == 0
+ || section->reloc_count == 0)
+ continue;
+
+ /* If this section is a link-once section that will be
+ discarded, then don't create any stubs. */
+ if (section->output_section == NULL
+ || section->output_section->owner != output_bfd)
+ continue;
+
+ /* Get the relocs. */
+ internal_relocs
+ = _bfd_elf64_link_read_relocs (input_bfd, section, NULL,
+ (Elf_Internal_Rela *) NULL,
+ info->keep_memory);
+ if (internal_relocs == NULL)
+ goto error_ret_free_local;
+
+ /* Now examine each relocation. */
+ irela = internal_relocs;
+ irelaend = irela + section->reloc_count;
+ for (; irela < irelaend; irela++)
+ {
+ unsigned int r_type, r_indx;
+ enum ppc_stub_type stub_type;
+ struct ppc_stub_hash_entry *stub_entry;
+ asection *sym_sec;
+ bfd_vma sym_value;
+ bfd_vma destination;
+ struct ppc_link_hash_entry *hash;
+ char *stub_name;
+ const asection *id_sec;
+
+ r_type = ELF64_R_TYPE (irela->r_info);
+ r_indx = ELF64_R_SYM (irela->r_info);
+
+ if (r_type >= (unsigned int) R_PPC_max)
+ {
+ bfd_set_error (bfd_error_bad_value);
+ goto error_ret_free_internal;
+ }
+
+ /* Only look for stubs on branch instructions. */
+ if (r_type != (unsigned int) R_PPC64_REL24
+ && r_type != (unsigned int) R_PPC64_REL14
+ && r_type != (unsigned int) R_PPC64_REL14_BRTAKEN
+ && r_type != (unsigned int) R_PPC64_REL14_BRNTAKEN)
+ continue;
+
+ /* Now determine the call target, its name, value,
+ section. */
+ sym_sec = NULL;
+ sym_value = 0;
+ destination = 0;
+ hash = NULL;
+ if (r_indx < symtab_hdr->sh_info)
+ {
+ /* It's a local symbol. */
+ Elf_Internal_Sym *sym;
+ Elf_Internal_Shdr *hdr;
+
+ if (local_syms == NULL)
+ {
+ local_syms
+ = (Elf_Internal_Sym *) symtab_hdr->contents;
+ if (local_syms == NULL)
+ local_syms
+ = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
+ symtab_hdr->sh_info, 0,
+ NULL, NULL, NULL);
+ if (local_syms == NULL)
+ goto error_ret_free_internal;
+ }
+ sym = local_syms + r_indx;
+ hdr = elf_elfsections (input_bfd)[sym->st_shndx];
+ sym_sec = hdr->bfd_section;
+ if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
+ sym_value = sym->st_value;
+ destination = (sym_value + irela->r_addend
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else
+ {
+ /* It's an external symbol. */
+ int e_indx;
+
+ e_indx = r_indx - symtab_hdr->sh_info;
+ hash = ((struct ppc_link_hash_entry *)
+ elf_sym_hashes (input_bfd)[e_indx]);
+
+ while (hash->elf.root.type == bfd_link_hash_indirect
+ || hash->elf.root.type == bfd_link_hash_warning)
+ hash = ((struct ppc_link_hash_entry *)
+ hash->elf.root.u.i.link);
+
+ if (hash->elf.root.type == bfd_link_hash_defined
+ || hash->elf.root.type == bfd_link_hash_defweak)
+ {
+ sym_sec = hash->elf.root.u.def.section;
+ sym_value = hash->elf.root.u.def.value;
+ if (sym_sec->output_section != NULL)
+ destination = (sym_value + irela->r_addend
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else if (hash->elf.root.type == bfd_link_hash_undefweak)
+ ;
+ else if (hash->elf.root.type == bfd_link_hash_undefined)
+ ;
+ else
+ {
+ bfd_set_error (bfd_error_bad_value);
+ goto error_ret_free_internal;
+ }
+ }
+
+ /* Determine what (if any) linker stub is needed. */
+ stub_type = ppc_type_of_stub (section, irela, &hash,
+ destination);
+ if (stub_type == ppc_stub_none)
+ continue;
+
+ /* Support for grouping stub sections. */
+ id_sec = htab->stub_group[section->id].link_sec;
+
+ /* Get the name of this stub. */
+ stub_name = ppc_stub_name (id_sec, sym_sec, hash, irela);
+ if (!stub_name)
+ goto error_ret_free_internal;
+
+ stub_entry = ppc_stub_hash_lookup (&htab->stub_hash_table,
+ stub_name, false, false);
+ if (stub_entry != NULL)
+ {
+ /* The proper stub has already been created. */
+ free (stub_name);
+ continue;
+ }
+
+ stub_entry = ppc_add_stub (stub_name, section, htab);
+ if (stub_entry == NULL)
+ {
+ free (stub_name);
+ error_ret_free_internal:
+ if (elf_section_data (section)->relocs == NULL)
+ free (internal_relocs);
+ error_ret_free_local:
+ if (local_syms != NULL
+ && (symtab_hdr->contents
+ != (unsigned char *) local_syms))
+ free (local_syms);
+ return false;
+ }
+
+ stub_entry->target_value = sym_value;
+ stub_entry->target_section = sym_sec;
+ stub_entry->stub_type = stub_type;
+ stub_entry->h = hash;
+ stub_changed = true;
+ }
+
+ /* We're done with the internal relocs, free them. */
+ if (elf_section_data (section)->relocs != internal_relocs)
+ free (internal_relocs);
+ }
+
+ if (local_syms != NULL
+ && symtab_hdr->contents != (unsigned char *) local_syms)
+ {
+ if (!info->keep_memory)
+ free (local_syms);
+ else
+ symtab_hdr->contents = (unsigned char *) local_syms;
+ }
+ }
+
+ if (!stub_changed)
+ break;
+
+ /* OK, we've added some stubs. Find out the new size of the
+ stub sections. */
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ {
+ stub_sec->_raw_size = 0;
+ stub_sec->_cooked_size = 0;
+ }
+ htab->sbrlt->_raw_size = 0;
+ htab->sbrlt->_cooked_size = 0;
+
+ bfd_hash_traverse (&htab->stub_hash_table, ppc_size_one_stub, htab);
+
+ /* Ask the linker to do its stuff. */
+ (*htab->layout_sections_again) ();
+ }
- if ((asect->flags & SEC_SORT_ENTRIES) != 0)
- shdr->sh_type = SHT_ORDERED;
+ /* It would be nice to strip .branch_lt from the output if the
+ section is empty, but it's too late. If we strip sections here,
+ the dynamic symbol table is corrupted since the section symbol
+ for the stripped section isn't written. */
return true;
}
+/* Called after we have determined section placement. If sections
+ move, we'll be called again. Provide a value for TOCstart. */
+
+bfd_vma
+ppc64_elf_toc (obfd)
+ bfd *obfd;
+{
+ asection *s;
+ bfd_vma TOCstart;
+
+ /* The TOC consists of sections .got, .toc, .tocbss, .plt in that
+ order. The TOC starts where the first of these sections starts. */
+ s = bfd_get_section_by_name (obfd, ".got");
+ if (s == NULL)
+ s = bfd_get_section_by_name (obfd, ".toc");
+ if (s == NULL)
+ s = bfd_get_section_by_name (obfd, ".tocbss");
+ if (s == NULL)
+ s = bfd_get_section_by_name (obfd, ".plt");
+ if (s == NULL)
+ {
+ /* This may happen for
+ o references to TOC base (SYM@toc / TOC[tc0]) without a
+ .toc directive
+ o bad linker script
+ o --gc-sections and empty TOC sections
+
+ FIXME: Warn user? */
+
+ /* Look for a likely section. We probably won't even be
+ using TOCstart. */
+ for (s = obfd->sections; s != NULL; s = s->next)
+ if ((s->flags & (SEC_ALLOC | SEC_SMALL_DATA | SEC_READONLY))
+ == (SEC_ALLOC | SEC_SMALL_DATA))
+ break;
+ if (s == NULL)
+ for (s = obfd->sections; s != NULL; s = s->next)
+ if ((s->flags & (SEC_ALLOC | SEC_SMALL_DATA))
+ == (SEC_ALLOC | SEC_SMALL_DATA))
+ break;
+ if (s == NULL)
+ for (s = obfd->sections; s != NULL; s = s->next)
+ if ((s->flags & (SEC_ALLOC | SEC_READONLY)) == SEC_ALLOC)
+ break;
+ if (s == NULL)
+ for (s = obfd->sections; s != NULL; s = s->next)
+ if ((s->flags & SEC_ALLOC) == SEC_ALLOC)
+ break;
+ }
+
+ TOCstart = 0;
+ if (s != NULL)
+ TOCstart = s->output_section->vma + s->output_offset;
+
+ return TOCstart;
+}
+
+/* Build all the stubs associated with the current output file.
+ The stubs are kept in a hash table attached to the main linker
+ hash table. This function is called via gldelf64ppc_finish. */
+
+boolean
+ppc64_elf_build_stubs (info)
+ struct bfd_link_info *info;
+{
+ struct ppc_link_hash_table *htab = ppc_hash_table (info);
+ asection *stub_sec;
+ bfd_vma plt_r2;
+ bfd_byte *p;
+
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ {
+ bfd_size_type size;
+
+ /* Allocate memory to hold the linker stubs. */
+ size = stub_sec->_raw_size;
+ if (size != 0)
+ {
+ stub_sec->contents = (bfd_byte *) bfd_zalloc (htab->stub_bfd, size);
+ if (stub_sec->contents == NULL)
+ return false;
+ }
+ stub_sec->_cooked_size = 0;
+ }
+
+ if (htab->splt != NULL)
+ {
+ unsigned int indx;
+
+ /* Build the .glink plt call stub. */
+ plt_r2 = (htab->splt->output_offset
+ + htab->splt->output_section->vma
+ - elf_gp (htab->splt->output_section->owner)
+ - TOC_BASE_OFF);
+ p = htab->sglink->contents;
+ p = build_plt_stub (htab->sglink->owner, p, (int) plt_r2, 1);
+ while (p < htab->sglink->contents + GLINK_CALL_STUB_SIZE)
+ {
+ bfd_put_32 (htab->sglink->owner, NOP, p);
+ p += 4;
+ }
+
+ /* Build the .glink lazy link call stubs. */
+ indx = 0;
+ while (p < htab->sglink->contents + htab->sglink->_raw_size)
+ {
+ if (indx < 0x8000)
+ {
+ bfd_put_32 (htab->sglink->owner, LI_R0_0 | indx, p);
+ p += 4;
+ }
+ else
+ {
+ bfd_put_32 (htab->sglink->owner, LIS_R0_0 | PPC_HI (indx), p);
+ p += 4;
+ bfd_put_32 (htab->sglink->owner, ORI_R0_R0_0 | PPC_LO (indx), p);
+ p += 4;
+ }
+ bfd_put_32 (htab->sglink->owner,
+ B_DOT | ((htab->sglink->contents - p) & 0x3fffffc), p);
+ indx++;
+ p += 4;
+ }
+ htab->sglink->_cooked_size = p - htab->sglink->contents;
+ }
+
+ if (htab->sbrlt->_raw_size != 0)
+ {
+ htab->sbrlt->contents = (bfd_byte *) bfd_zalloc (htab->sbrlt->owner,
+ htab->sbrlt->_raw_size);
+ if (htab->sbrlt->contents == NULL)
+ return false;
+ }
+
+ /* Build the stubs as directed by the stub hash table. */
+ bfd_hash_traverse (&htab->stub_hash_table, ppc_build_one_stub, info);
+
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ {
+ if (stub_sec->_raw_size != stub_sec->_cooked_size)
+ break;
+ }
+
+ if (stub_sec != NULL
+ || htab->sglink->_raw_size != htab->sglink->_cooked_size)
+ {
+ htab->stub_error = true;
+ (*_bfd_error_handler) (_("stubs don't match calculated size"));
+ }
+
+ return !htab->stub_error;
+}
+
/* The RELOCATE_SECTION function is called by the ELF backend linker
to handle the relocations for a section.
@@ -3897,7 +5269,7 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
TOCstart = elf_gp (output_bfd);
symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (input_bfd);
- is_opd = strcmp (bfd_get_section_name (abfd, input_section), ".opd") == 0;
+ is_opd = elf_section_data (input_section)->tdata != NULL;
rel = relocs;
relend = relocs + input_section->reloc_count;
@@ -3910,11 +5282,15 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
Elf_Internal_Sym *sym;
asection *sec;
struct elf_link_hash_entry *h;
+ struct elf_link_hash_entry *fdh;
const char *sym_name;
unsigned long r_symndx;
bfd_vma relocation;
boolean unresolved_reloc;
+ boolean warned;
long insn;
+ struct ppc_stub_hash_entry *stub_entry;
+ bfd_vma max_br_offset;
bfd_vma from;
r_type = (enum elf_ppc_reloc_type) ELF64_R_TYPE (rel->r_info);
@@ -3927,6 +5303,7 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
h = (struct elf_link_hash_entry *) 0;
sym_name = (const char *) 0;
unresolved_reloc = false;
+ warned = false;
if (r_type == R_PPC64_TOC)
{
@@ -3943,6 +5320,15 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
relocation = _bfd_elf_rela_local_sym (output_bfd, sym, sec, rel);
/* rel may have changed, update our copy of addend. */
addend = rel->r_addend;
+
+ if (elf_section_data (sec) != NULL)
+ {
+ long *opd_sym_adjust;
+
+ opd_sym_adjust = (long *) elf_section_data (sec)->tdata;
+ if (opd_sym_adjust != NULL && sym->st_value % 24 == 0)
+ relocation += opd_sym_adjust[sym->st_value / 24];
+ }
}
else
{
@@ -3983,6 +5369,7 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
|| info->no_undefined
|| ELF_ST_VISIBILITY (h->other)))))
return false;
+ warned = true;
}
}
@@ -3996,8 +5383,8 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
/* Branch taken prediction relocations. */
case R_PPC64_ADDR14_BRTAKEN:
case R_PPC64_REL14_BRTAKEN:
- insn = 0x01 << 21; /* 'y' or 't' bit, lowest bit of BO field. */
- /* Fall thru. */
+ insn = 0x01 << 21; /* 'y' or 't' bit, lowest bit of BO field. */
+ /* Fall thru. */
/* Branch not taken prediction relocations. */
case R_PPC64_ADDR14_BRNTAKEN:
@@ -4036,8 +5423,10 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
to alter the TOC base. These are recognized by their
need for a PLT entry. */
if (h != NULL
- && h->plt.offset != (bfd_vma) -1
- && htab->sstub != NULL)
+ && (fdh = ((struct ppc_link_hash_entry *) h)->oh) != NULL
+ && fdh->plt.offset != (bfd_vma) -1
+ && (stub_entry = ppc_get_stub_entry (input_section, sec, fdh,
+ rel, htab)) != NULL)
{
boolean can_plt_call = 0;
@@ -4064,10 +5453,9 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
if (can_plt_call)
{
- /* plt.offset here is the offset into the stub section. */
- relocation = (htab->sstub->output_section->vma
- + htab->sstub->output_offset
- + h->plt.offset);
+ relocation = (stub_entry->stub_offset
+ + stub_entry->stub_sec->output_offset
+ + stub_entry->stub_sec->output_section->vma);
addend = 0;
unresolved_reloc = false;
}
@@ -4273,14 +5661,14 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
addend -= sec->output_section->vma;
break;
+ case R_PPC64_REL14:
+ case R_PPC64_REL14_BRNTAKEN:
+ case R_PPC64_REL14_BRTAKEN:
case R_PPC64_REL24:
break;
/* Relocations that may need to be propagated if this is a
dynamic object. */
- case R_PPC64_REL14:
- case R_PPC64_REL14_BRNTAKEN:
- case R_PPC64_REL14_BRTAKEN:
case R_PPC64_REL32:
case R_PPC64_REL64:
case R_PPC64_ADDR14:
@@ -4378,6 +5766,20 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
relocate = true;
if (r_type == R_PPC64_ADDR64 || r_type == R_PPC64_TOC)
{
+ if (is_opd && h != NULL)
+ {
+ /* Lie about opd entries. This case occurs
+ when building shared libraries and we
+ reference a function in another shared
+ lib. The same thing happens for a weak
+ definition in an application that's
+ overridden by a strong definition in a
+ shared lib. (I believe this is a generic
+ bug in binutils handling of weak syms.)
+ In these cases we won't use the opd
+ entry in this lib. */
+ unresolved_reloc = false;
+ }
outrel.r_info = ELF64_R_INFO (0, R_PPC64_RELATIVE);
}
else
@@ -4497,22 +5899,51 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
continue;
}
break;
+
+ case R_PPC64_REL14:
+ case R_PPC64_REL14_BRNTAKEN:
+ case R_PPC64_REL14_BRTAKEN:
+ max_br_offset = 1 << 15;
+ goto branch_check;
+
+ case R_PPC64_REL24:
+ max_br_offset = 1 << 25;
+
+ branch_check:
+ /* If the branch is out of reach, then redirect the
+ call to the local stub for this function. */
+ from = (offset
+ + input_section->output_offset
+ + input_section->output_section->vma);
+ if (relocation + addend - from + max_br_offset >= 2 * max_br_offset
+ && (stub_entry = ppc_get_stub_entry (input_section, sec, h,
+ rel, htab)) != NULL)
+ {
+ /* Munge up the value and addend so that we call the stub
+ rather than the procedure directly. */
+ relocation = (stub_entry->stub_offset
+ + stub_entry->stub_sec->output_offset
+ + stub_entry->stub_sec->output_section->vma);
+ addend = 0;
+ }
+ break;
}
- /* FIXME: Why do we allow debugging sections to escape this error?
- More importantly, why do we not emit dynamic relocs above in
- debugging sections (which are ! SEC_ALLOC)? If we had
- emitted the dynamic reloc, we could remove the fudge here. */
+ /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
+ because such sections are not SEC_ALLOC and thus ld.so will
+ not process them. */
if (unresolved_reloc
- && !(info->shared
- && (input_section->flags & SEC_DEBUGGING) != 0
+ && !((input_section->flags & SEC_DEBUGGING) != 0
&& (h->elf_link_hash_flags & ELF_LINK_HASH_DEF_DYNAMIC) != 0))
- (*_bfd_error_handler)
- (_("%s(%s+0x%lx): unresolvable relocation against symbol `%s'"),
- bfd_archive_filename (input_bfd),
- bfd_get_section_name (input_bfd, input_section),
- (long) rel->r_offset,
- h->root.root.string);
+ {
+ (*_bfd_error_handler)
+ (_("%s(%s+0x%lx): unresolvable relocation against symbol `%s'"),
+ bfd_archive_filename (input_bfd),
+ bfd_get_section_name (input_bfd, input_section),
+ (long) rel->r_offset,
+ h->root.root.string);
+ ret = false;
+ }
r = _bfd_final_link_relocate (ppc64_elf_howto_table[(int) r_type],
input_bfd,
@@ -4522,9 +5953,7 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
relocation,
addend);
- if (r == bfd_reloc_ok)
- ;
- else if (r == bfd_reloc_overflow)
+ if (r != bfd_reloc_ok)
{
const char *name;
@@ -4555,13 +5984,25 @@ ppc64_elf_relocate_section (output_bfd, info, input_bfd, input_section,
name = bfd_section_name (input_bfd, sec);
}
- if (! ((*info->callbacks->reloc_overflow)
- (info, name, ppc64_elf_howto_table[(int) r_type]->name,
- (bfd_vma) 0, input_bfd, input_section, offset)))
- return false;
+ if (r == bfd_reloc_overflow)
+ {
+ if (warned)
+ continue;
+ if (!((*info->callbacks->reloc_overflow)
+ (info, name, ppc64_elf_howto_table[(int) r_type]->name,
+ rel->r_addend, input_bfd, input_section, offset)))
+ return false;
+ }
+ else
+ {
+ (*_bfd_error_handler)
+ (_("%s(%s+0x%lx): reloc against `%s': error %d"),
+ bfd_archive_filename (input_bfd),
+ bfd_get_section_name (input_bfd, input_section),
+ (long) rel->r_offset, name, (int) r);
+ ret = false;
+ }
}
- else
- ret = false;
}
return ret;
@@ -4590,7 +6031,7 @@ ppc64_elf_finish_dynamic_symbol (output_bfd, info, h, sym)
Elf64_External_Rela *loc;
/* This symbol has an entry in the procedure linkage table. Set
- it up. */
+ it up. */
if (htab->splt == NULL
|| htab->srelplt == NULL
@@ -4617,7 +6058,7 @@ ppc64_elf_finish_dynamic_symbol (output_bfd, info, h, sym)
Elf64_External_Rela *loc;
/* This symbol has an entry in the global offset table. Set it
- up. */
+ up. */
if (htab->sgot == NULL || htab->srelgot == NULL)
abort ();
@@ -4840,11 +6281,11 @@ ppc64_elf_finish_dynamic_sections (output_bfd, info)
#define elf_backend_rela_normal 1
#define bfd_elf64_bfd_reloc_type_lookup ppc64_elf_reloc_type_lookup
-#define bfd_elf64_bfd_set_private_flags ppc64_elf_set_private_flags
#define bfd_elf64_bfd_merge_private_bfd_data ppc64_elf_merge_private_bfd_data
#define bfd_elf64_bfd_link_hash_table_create ppc64_elf_link_hash_table_create
+#define bfd_elf64_bfd_link_hash_table_free ppc64_elf_link_hash_table_free
-#define elf_backend_section_from_shdr ppc64_elf_section_from_shdr
+#define elf_backend_object_p ppc64_elf_object_p
#define elf_backend_create_dynamic_sections ppc64_elf_create_dynamic_sections
#define elf_backend_copy_indirect_symbol ppc64_elf_copy_indirect_symbol
#define elf_backend_check_relocs ppc64_elf_check_relocs
@@ -4854,7 +6295,6 @@ ppc64_elf_finish_dynamic_sections (output_bfd, info)
#define elf_backend_hide_symbol ppc64_elf_hide_symbol
#define elf_backend_always_size_sections ppc64_elf_func_desc_adjust
#define elf_backend_size_dynamic_sections ppc64_elf_size_dynamic_sections
-#define elf_backend_fake_sections ppc64_elf_fake_sections
#define elf_backend_relocate_section ppc64_elf_relocate_section
#define elf_backend_finish_dynamic_symbol ppc64_elf_finish_dynamic_symbol
#define elf_backend_reloc_type_class ppc64_elf_reloc_type_class
OpenPOWER on IntegriCloud