blob: b8e0e76c94200717506b1136d0fcc7e8041a61fc [file] [log] [blame]
/* IA-64 support for 64-bit ELF
Copyright (C) 1998-2016 Free Software Foundation, Inc.
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
This file is part of BFD, the Binary File Descriptor library.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
MA 02110-1301, USA. */
#include "sysdep.h"
#include "bfd.h"
#include "libbfd.h"
#include "elf-bfd.h"
#include "opcode/ia64.h"
#include "elf/ia64.h"
#include "objalloc.h"
#include "hashtab.h"
#include "bfd_stdint.h"
#include "elfxx-ia64.h"
#define ARCH_SIZE NN
#if ARCH_SIZE == 64
#define LOG_SECTION_ALIGN 3
#endif
#if ARCH_SIZE == 32
#define LOG_SECTION_ALIGN 2
#endif
typedef struct bfd_hash_entry *(*new_hash_entry_func)
(struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
/* In dynamically (linker-) created sections, we generally need to keep track
of the place a symbol or expression got allocated to. This is done via hash
tables that store entries of the following type. */
struct elfNN_ia64_dyn_sym_info
{
/* The addend for which this entry is relevant. */
bfd_vma addend;
bfd_vma got_offset;
bfd_vma fptr_offset;
bfd_vma pltoff_offset;
bfd_vma plt_offset;
bfd_vma plt2_offset;
bfd_vma tprel_offset;
bfd_vma dtpmod_offset;
bfd_vma dtprel_offset;
/* The symbol table entry, if any, that this was derived from. */
struct elf_link_hash_entry *h;
/* Used to count non-got, non-plt relocations for delayed sizing
of relocation sections. */
struct elfNN_ia64_dyn_reloc_entry
{
struct elfNN_ia64_dyn_reloc_entry *next;
asection *srel;
int type;
int count;
/* Is this reloc against readonly section? */
bfd_boolean reltext;
} *reloc_entries;
/* TRUE when the section contents have been updated. */
unsigned got_done : 1;
unsigned fptr_done : 1;
unsigned pltoff_done : 1;
unsigned tprel_done : 1;
unsigned dtpmod_done : 1;
unsigned dtprel_done : 1;
/* TRUE for the different kinds of linker data we want created. */
unsigned want_got : 1;
unsigned want_gotx : 1;
unsigned want_fptr : 1;
unsigned want_ltoff_fptr : 1;
unsigned want_plt : 1;
unsigned want_plt2 : 1;
unsigned want_pltoff : 1;
unsigned want_tprel : 1;
unsigned want_dtpmod : 1;
unsigned want_dtprel : 1;
};
struct elfNN_ia64_local_hash_entry
{
int id;
unsigned int r_sym;
/* The number of elements in elfNN_ia64_dyn_sym_info array. */
unsigned int count;
/* The number of sorted elements in elfNN_ia64_dyn_sym_info array. */
unsigned int sorted_count;
/* The size of elfNN_ia64_dyn_sym_info array. */
unsigned int size;
/* The array of elfNN_ia64_dyn_sym_info. */
struct elfNN_ia64_dyn_sym_info *info;
/* TRUE if this hash entry's addends was translated for
SHF_MERGE optimization. */
unsigned sec_merge_done : 1;
};
struct elfNN_ia64_link_hash_entry
{
struct elf_link_hash_entry root;
/* The number of elements in elfNN_ia64_dyn_sym_info array. */
unsigned int count;
/* The number of sorted elements in elfNN_ia64_dyn_sym_info array. */
unsigned int sorted_count;
/* The size of elfNN_ia64_dyn_sym_info array. */
unsigned int size;
/* The array of elfNN_ia64_dyn_sym_info. */
struct elfNN_ia64_dyn_sym_info *info;
};
struct elfNN_ia64_link_hash_table
{
/* The main hash table. */
struct elf_link_hash_table root;
asection *fptr_sec; /* Function descriptor table (or NULL). */
asection *rel_fptr_sec; /* Dynamic relocation section for same. */
asection *pltoff_sec; /* Private descriptors for plt (or NULL). */
asection *rel_pltoff_sec; /* Dynamic relocation section for same. */
bfd_size_type minplt_entries; /* Number of minplt entries. */
unsigned reltext : 1; /* Are there relocs against readonly sections? */
unsigned self_dtpmod_done : 1;/* Has self DTPMOD entry been finished? */
bfd_vma self_dtpmod_offset; /* .got offset to self DTPMOD entry. */
/* There are maybe R_IA64_GPREL22 relocations, including those
optimized from R_IA64_LTOFF22X, against non-SHF_IA_64_SHORT
sections. We need to record those sections so that we can choose
a proper GP to cover all R_IA64_GPREL22 relocations. */
asection *max_short_sec; /* Maximum short output section. */
bfd_vma max_short_offset; /* Maximum short offset. */
asection *min_short_sec; /* Minimum short output section. */
bfd_vma min_short_offset; /* Minimum short offset. */
htab_t loc_hash_table;
void *loc_hash_memory;
};
struct elfNN_ia64_allocate_data
{
struct bfd_link_info *info;
bfd_size_type ofs;
bfd_boolean only_got;
};
#define elfNN_ia64_hash_table(p) \
(elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
== IA64_ELF_DATA ? ((struct elfNN_ia64_link_hash_table *) ((p)->hash)) : NULL)
static struct elfNN_ia64_dyn_sym_info * get_dyn_sym_info
(struct elfNN_ia64_link_hash_table *ia64_info,
struct elf_link_hash_entry *h,
bfd *abfd, const Elf_Internal_Rela *rel, bfd_boolean create);
static bfd_boolean elfNN_ia64_dynamic_symbol_p
(struct elf_link_hash_entry *h, struct bfd_link_info *info, int);
static bfd_boolean elfNN_ia64_choose_gp
(bfd *abfd, struct bfd_link_info *info, bfd_boolean final);
static void elfNN_ia64_dyn_sym_traverse
(struct elfNN_ia64_link_hash_table *ia64_info,
bfd_boolean (*func) (struct elfNN_ia64_dyn_sym_info *, void *),
void * info);
static bfd_boolean allocate_global_data_got
(struct elfNN_ia64_dyn_sym_info *dyn_i, void * data);
static bfd_boolean allocate_global_fptr_got
(struct elfNN_ia64_dyn_sym_info *dyn_i, void * data);
static bfd_boolean allocate_local_got
(struct elfNN_ia64_dyn_sym_info *dyn_i, void * data);
static bfd_boolean elfNN_ia64_hpux_vec
(const bfd_target *vec);
static bfd_boolean allocate_dynrel_entries
(struct elfNN_ia64_dyn_sym_info *dyn_i, void * data);
static asection *get_pltoff
(bfd *abfd, struct bfd_link_info *info,
struct elfNN_ia64_link_hash_table *ia64_info);
/* ia64-specific relocation. */
/* Given a ELF reloc, return the matching HOWTO structure. */
static void
elfNN_ia64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
arelent *bfd_reloc,
Elf_Internal_Rela *elf_reloc)
{
bfd_reloc->howto
= ia64_elf_lookup_howto ((unsigned int) ELFNN_R_TYPE (elf_reloc->r_info));
}
#define PLT_HEADER_SIZE (3 * 16)
#define PLT_MIN_ENTRY_SIZE (1 * 16)
#define PLT_FULL_ENTRY_SIZE (2 * 16)
#define PLT_RESERVED_WORDS 3
static const bfd_byte plt_header[PLT_HEADER_SIZE] =
{
0x0b, 0x10, 0x00, 0x1c, 0x00, 0x21, /* [MMI] mov r2=r14;; */
0xe0, 0x00, 0x08, 0x00, 0x48, 0x00, /* addl r14=0,r2 */
0x00, 0x00, 0x04, 0x00, /* nop.i 0x0;; */
0x0b, 0x80, 0x20, 0x1c, 0x18, 0x14, /* [MMI] ld8 r16=[r14],8;; */
0x10, 0x41, 0x38, 0x30, 0x28, 0x00, /* ld8 r17=[r14],8 */
0x00, 0x00, 0x04, 0x00, /* nop.i 0x0;; */
0x11, 0x08, 0x00, 0x1c, 0x18, 0x10, /* [MIB] ld8 r1=[r14] */
0x60, 0x88, 0x04, 0x80, 0x03, 0x00, /* mov b6=r17 */
0x60, 0x00, 0x80, 0x00 /* br.few b6;; */
};
static const bfd_byte plt_min_entry[PLT_MIN_ENTRY_SIZE] =
{
0x11, 0x78, 0x00, 0x00, 0x00, 0x24, /* [MIB] mov r15=0 */
0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */
0x00, 0x00, 0x00, 0x40 /* br.few 0 <PLT0>;; */
};
static const bfd_byte plt_full_entry[PLT_FULL_ENTRY_SIZE] =
{
0x0b, 0x78, 0x00, 0x02, 0x00, 0x24, /* [MMI] addl r15=0,r1;; */
0x00, 0x41, 0x3c, 0x70, 0x29, 0xc0, /* ld8.acq r16=[r15],8*/
0x01, 0x08, 0x00, 0x84, /* mov r14=r1;; */
0x11, 0x08, 0x00, 0x1e, 0x18, 0x10, /* [MIB] ld8 r1=[r15] */
0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
0x60, 0x00, 0x80, 0x00 /* br.few b6;; */
};
#define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
static const bfd_byte oor_brl[16] =
{
0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.sptk.few tgt;; */
0x00, 0x00, 0x00, 0xc0
};
static const bfd_byte oor_ip[48] =
{
0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, /* movl r15=0 */
0x01, 0x00, 0x00, 0x60,
0x03, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0 */
0x00, 0x01, 0x00, 0x60, 0x00, 0x00, /* mov r16=ip;; */
0xf2, 0x80, 0x00, 0x80, /* add r16=r15,r16;; */
0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
0x60, 0x00, 0x80, 0x00 /* br b6;; */
};
static size_t oor_branch_size = sizeof (oor_brl);
void
bfd_elfNN_ia64_after_parse (int itanium)
{
oor_branch_size = itanium ? sizeof (oor_ip) : sizeof (oor_brl);
}
/* Rename some of the generic section flags to better document how they
are used here. */
#define skip_relax_pass_0 sec_flg0
#define skip_relax_pass_1 sec_flg1
/* These functions do relaxation for IA-64 ELF. */
static void
elfNN_ia64_update_short_info (asection *sec, bfd_vma offset,
struct elfNN_ia64_link_hash_table *ia64_info)
{
/* Skip ABS and SHF_IA_64_SHORT sections. */
if (sec == bfd_abs_section_ptr
|| (sec->flags & SEC_SMALL_DATA) != 0)
return;
if (!ia64_info->min_short_sec)
{
ia64_info->max_short_sec = sec;
ia64_info->max_short_offset = offset;
ia64_info->min_short_sec = sec;
ia64_info->min_short_offset = offset;
}
else if (sec == ia64_info->max_short_sec
&& offset > ia64_info->max_short_offset)
ia64_info->max_short_offset = offset;
else if (sec == ia64_info->min_short_sec
&& offset < ia64_info->min_short_offset)
ia64_info->min_short_offset = offset;
else if (sec->output_section->vma
> ia64_info->max_short_sec->vma)
{
ia64_info->max_short_sec = sec;
ia64_info->max_short_offset = offset;
}
else if (sec->output_section->vma
< ia64_info->min_short_sec->vma)
{
ia64_info->min_short_sec = sec;
ia64_info->min_short_offset = offset;
}
}
static bfd_boolean
elfNN_ia64_relax_section (bfd *abfd, asection *sec,
struct bfd_link_info *link_info,
bfd_boolean *again)
{
struct one_fixup
{
struct one_fixup *next;
asection *tsec;
bfd_vma toff;
bfd_vma trampoff;
};
Elf_Internal_Shdr *symtab_hdr;
Elf_Internal_Rela *internal_relocs;
Elf_Internal_Rela *irel, *irelend;
bfd_byte *contents;
Elf_Internal_Sym *isymbuf = NULL;
struct elfNN_ia64_link_hash_table *ia64_info;
struct one_fixup *fixups = NULL;
bfd_boolean changed_contents = FALSE;
bfd_boolean changed_relocs = FALSE;
bfd_boolean changed_got = FALSE;
bfd_boolean skip_relax_pass_0 = TRUE;
bfd_boolean skip_relax_pass_1 = TRUE;
bfd_vma gp = 0;
/* Assume we're not going to change any sizes, and we'll only need
one pass. */
*again = FALSE;
if (bfd_link_relocatable (link_info))
(*link_info->callbacks->einfo)
(_("%P%F: --relax and -r may not be used together\n"));
/* Don't even try to relax for non-ELF outputs. */
if (!is_elf_hash_table (link_info->hash))
return FALSE;
/* Nothing to do if there are no relocations or there is no need for
the current pass. */
if ((sec->flags & SEC_RELOC) == 0
|| sec->reloc_count == 0
|| (link_info->relax_pass == 0 && sec->skip_relax_pass_0)
|| (link_info->relax_pass == 1 && sec->skip_relax_pass_1))
return TRUE;
ia64_info = elfNN_ia64_hash_table (link_info);
if (ia64_info == NULL)
return FALSE;
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
/* Load the relocations for this section. */
internal_relocs = (_bfd_elf_link_read_relocs
(abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
link_info->keep_memory));
if (internal_relocs == NULL)
return FALSE;
irelend = internal_relocs + sec->reloc_count;
/* Get the section contents. */
if (elf_section_data (sec)->this_hdr.contents != NULL)
contents = elf_section_data (sec)->this_hdr.contents;
else
{
if (!bfd_malloc_and_get_section (abfd, sec, &contents))
goto error_return;
}
for (irel = internal_relocs; irel < irelend; irel++)
{
unsigned long r_type = ELFNN_R_TYPE (irel->r_info);
bfd_vma symaddr, reladdr, trampoff, toff, roff;
asection *tsec;
struct one_fixup *f;
bfd_size_type amt;
bfd_boolean is_branch;
struct elfNN_ia64_dyn_sym_info *dyn_i;
char symtype;
switch (r_type)
{
case R_IA64_PCREL21B:
case R_IA64_PCREL21BI:
case R_IA64_PCREL21M:
case R_IA64_PCREL21F:
/* In pass 1, all br relaxations are done. We can skip it. */
if (link_info->relax_pass == 1)
continue;
skip_relax_pass_0 = FALSE;
is_branch = TRUE;
break;
case R_IA64_PCREL60B:
/* We can't optimize brl to br in pass 0 since br relaxations
will increase the code size. Defer it to pass 1. */
if (link_info->relax_pass == 0)
{
skip_relax_pass_1 = FALSE;
continue;
}
is_branch = TRUE;
break;
case R_IA64_GPREL22:
/* Update max_short_sec/min_short_sec. */
case R_IA64_LTOFF22X:
case R_IA64_LDXMOV:
/* We can't relax ldx/mov in pass 0 since br relaxations will
increase the code size. Defer it to pass 1. */
if (link_info->relax_pass == 0)
{
skip_relax_pass_1 = FALSE;
continue;
}
is_branch = FALSE;
break;
default:
continue;
}
/* Get the value of the symbol referred to by the reloc. */
if (ELFNN_R_SYM (irel->r_info) < symtab_hdr->sh_info)
{
/* A local symbol. */
Elf_Internal_Sym *isym;
/* Read this BFD's local symbols. */
if (isymbuf == NULL)
{
isymbuf = (Elf_Internal_Sym *) symtab_hdr->contents;
if (isymbuf == NULL)
isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr,
symtab_hdr->sh_info, 0,
NULL, NULL, NULL);
if (isymbuf == 0)
goto error_return;
}
isym = isymbuf + ELFNN_R_SYM (irel->r_info);
if (isym->st_shndx == SHN_UNDEF)
continue; /* We can't do anything with undefined symbols. */
else if (isym->st_shndx == SHN_ABS)
tsec = bfd_abs_section_ptr;
else if (isym->st_shndx == SHN_COMMON)
tsec = bfd_com_section_ptr;
else if (isym->st_shndx == SHN_IA_64_ANSI_COMMON)
tsec = bfd_com_section_ptr;
else
tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
toff = isym->st_value;
dyn_i = get_dyn_sym_info (ia64_info, NULL, abfd, irel, FALSE);
symtype = ELF_ST_TYPE (isym->st_info);
}
else
{
unsigned long indx;
struct elf_link_hash_entry *h;
indx = ELFNN_R_SYM (irel->r_info) - symtab_hdr->sh_info;
h = elf_sym_hashes (abfd)[indx];
BFD_ASSERT (h != NULL);
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
dyn_i = get_dyn_sym_info (ia64_info, h, abfd, irel, FALSE);
/* For branches to dynamic symbols, we're interested instead
in a branch to the PLT entry. */
if (is_branch && dyn_i && dyn_i->want_plt2)
{
/* Internal branches shouldn't be sent to the PLT.
Leave this for now and we'll give an error later. */
if (r_type != R_IA64_PCREL21B)
continue;
tsec = ia64_info->root.splt;
toff = dyn_i->plt2_offset;
BFD_ASSERT (irel->r_addend == 0);
}
/* Can't do anything else with dynamic symbols. */
else if (elfNN_ia64_dynamic_symbol_p (h, link_info, r_type))
continue;
else
{
/* We can't do anything with undefined symbols. */
if (h->root.type == bfd_link_hash_undefined
|| h->root.type == bfd_link_hash_undefweak)
continue;
tsec = h->root.u.def.section;
toff = h->root.u.def.value;
}
symtype = h->type;
}
if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
{
/* At this stage in linking, no SEC_MERGE symbol has been
adjusted, so all references to such symbols need to be
passed through _bfd_merged_section_offset. (Later, in
relocate_section, all SEC_MERGE symbols *except* for
section symbols have been adjusted.)
gas may reduce relocations against symbols in SEC_MERGE
sections to a relocation against the section symbol when
the original addend was zero. When the reloc is against
a section symbol we should include the addend in the
offset passed to _bfd_merged_section_offset, since the
location of interest is the original symbol. On the
other hand, an access to "sym+addend" where "sym" is not
a section symbol should not include the addend; Such an
access is presumed to be an offset from "sym"; The
location of interest is just "sym". */
if (symtype == STT_SECTION)
toff += irel->r_addend;
toff = _bfd_merged_section_offset (abfd, &tsec,
elf_section_data (tsec)->sec_info,
toff);
if (symtype != STT_SECTION)
toff += irel->r_addend;
}
else
toff += irel->r_addend;
symaddr = tsec->output_section->vma + tsec->output_offset + toff;
roff = irel->r_offset;
if (is_branch)
{
bfd_signed_vma offset;
reladdr = (sec->output_section->vma
+ sec->output_offset
+ roff) & (bfd_vma) -4;
/* The .plt section is aligned at 32byte and the .text section
is aligned at 64byte. The .text section is right after the
.plt section. After the first relaxation pass, linker may
increase the gap between the .plt and .text sections up
to 32byte. We assume linker will always insert 32byte
between the .plt and .text sections after the first
relaxation pass. */
if (tsec == ia64_info->root.splt)
offset = -0x1000000 + 32;
else
offset = -0x1000000;
/* If the branch is in range, no need to do anything. */
if ((bfd_signed_vma) (symaddr - reladdr) >= offset
&& (bfd_signed_vma) (symaddr - reladdr) <= 0x0FFFFF0)
{
/* If the 60-bit branch is in 21-bit range, optimize it. */
if (r_type == R_IA64_PCREL60B)
{
ia64_elf_relax_brl (contents, roff);
irel->r_info
= ELFNN_R_INFO (ELFNN_R_SYM (irel->r_info),
R_IA64_PCREL21B);
/* If the original relocation offset points to slot
1, change it to slot 2. */
if ((irel->r_offset & 3) == 1)
irel->r_offset += 1;
}
continue;
}
else if (r_type == R_IA64_PCREL60B)
continue;
else if (ia64_elf_relax_br (contents, roff))
{
irel->r_info
= ELFNN_R_INFO (ELFNN_R_SYM (irel->r_info),
R_IA64_PCREL60B);
/* Make the relocation offset point to slot 1. */
irel->r_offset = (irel->r_offset & ~((bfd_vma) 0x3)) + 1;
continue;
}
/* We can't put a trampoline in a .init/.fini section. Issue
an error. */
if (strcmp (sec->output_section->name, ".init") == 0
|| strcmp (sec->output_section->name, ".fini") == 0)
{
(*_bfd_error_handler)
(_("%B: Can't relax br at 0x%lx in section `%A'. Please use brl or indirect branch."),
sec->owner, sec, (unsigned long) roff);
bfd_set_error (bfd_error_bad_value);
goto error_return;
}
/* If the branch and target are in the same section, you've
got one honking big section and we can't help you unless
you are branching backwards. You'll get an error message
later. */
if (tsec == sec && toff > roff)
continue;
/* Look for an existing fixup to this address. */
for (f = fixups; f ; f = f->next)
if (f->tsec == tsec && f->toff == toff)
break;
if (f == NULL)
{
/* Two alternatives: If it's a branch to a PLT entry, we can
make a copy of the FULL_PLT entry. Otherwise, we'll have
to use a `brl' insn to get where we're going. */
size_t size;
if (tsec == ia64_info->root.splt)
size = sizeof (plt_full_entry);
else
size = oor_branch_size;
/* Resize the current section to make room for the new branch. */
trampoff = (sec->size + 15) & (bfd_vma) -16;
/* If trampoline is out of range, there is nothing we
can do. */
offset = trampoff - (roff & (bfd_vma) -4);
if (offset < -0x1000000 || offset > 0x0FFFFF0)
continue;
amt = trampoff + size;
contents = (bfd_byte *) bfd_realloc (contents, amt);
if (contents == NULL)
goto error_return;
sec->size = amt;
if (tsec == ia64_info->root.splt)
{
memcpy (contents + trampoff, plt_full_entry, size);
/* Hijack the old relocation for use as the PLTOFF reloc. */
irel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (irel->r_info),
R_IA64_PLTOFF22);
irel->r_offset = trampoff;
}
else
{
if (size == sizeof (oor_ip))
{
memcpy (contents + trampoff, oor_ip, size);
irel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (irel->r_info),
R_IA64_PCREL64I);
irel->r_addend -= 16;
irel->r_offset = trampoff + 2;
}
else
{
memcpy (contents + trampoff, oor_brl, size);
irel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (irel->r_info),
R_IA64_PCREL60B);
irel->r_offset = trampoff + 2;
}
}
/* Record the fixup so we don't do it again this section. */
f = (struct one_fixup *)
bfd_malloc ((bfd_size_type) sizeof (*f));
f->next = fixups;
f->tsec = tsec;
f->toff = toff;
f->trampoff = trampoff;
fixups = f;
}
else
{
/* If trampoline is out of range, there is nothing we
can do. */
offset = f->trampoff - (roff & (bfd_vma) -4);
if (offset < -0x1000000 || offset > 0x0FFFFF0)
continue;
/* Nop out the reloc, since we're finalizing things here. */
irel->r_info = ELFNN_R_INFO (0, R_IA64_NONE);
}
/* Fix up the existing branch to hit the trampoline. */
if (ia64_elf_install_value (contents + roff, offset, r_type)
!= bfd_reloc_ok)
goto error_return;
changed_contents = TRUE;
changed_relocs = TRUE;
}
else
{
/* Fetch the gp. */
if (gp == 0)
{
bfd *obfd = sec->output_section->owner;
gp = _bfd_get_gp_value (obfd);
if (gp == 0)
{
if (!elfNN_ia64_choose_gp (obfd, link_info, FALSE))
goto error_return;
gp = _bfd_get_gp_value (obfd);
}
}
/* If the data is out of range, do nothing. */
if ((bfd_signed_vma) (symaddr - gp) >= 0x200000
||(bfd_signed_vma) (symaddr - gp) < -0x200000)
continue;
if (r_type == R_IA64_GPREL22)
elfNN_ia64_update_short_info (tsec->output_section,
tsec->output_offset + toff,
ia64_info);
else if (r_type == R_IA64_LTOFF22X)
{
irel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (irel->r_info),
R_IA64_GPREL22);
changed_relocs = TRUE;
if (dyn_i->want_gotx)
{
dyn_i->want_gotx = 0;
changed_got |= !dyn_i->want_got;
}
elfNN_ia64_update_short_info (tsec->output_section,
tsec->output_offset + toff,
ia64_info);
}
else
{
ia64_elf_relax_ldxmov (contents, roff);
irel->r_info = ELFNN_R_INFO (0, R_IA64_NONE);
changed_contents = TRUE;
changed_relocs = TRUE;
}
}
}
/* ??? If we created fixups, this may push the code segment large
enough that the data segment moves, which will change the GP.
Reset the GP so that we re-calculate next round. We need to
do this at the _beginning_ of the next round; now will not do. */
/* Clean up and go home. */
while (fixups)
{
struct one_fixup *f = fixups;
fixups = fixups->next;
free (f);
}
if (isymbuf != NULL
&& symtab_hdr->contents != (unsigned char *) isymbuf)
{
if (! link_info->keep_memory)
free (isymbuf);
else
{
/* Cache the symbols for elf_link_input_bfd. */
symtab_hdr->contents = (unsigned char *) isymbuf;
}
}
if (contents != NULL
&& elf_section_data (sec)->this_hdr.contents != contents)
{
if (!changed_contents && !link_info->keep_memory)
free (contents);
else
{
/* Cache the section contents for elf_link_input_bfd. */
elf_section_data (sec)->this_hdr.contents = contents;
}
}
if (elf_section_data (sec)->relocs != internal_relocs)
{
if (!changed_relocs)
free (internal_relocs);
else
elf_section_data (sec)->relocs = internal_relocs;
}
if (changed_got)
{
struct elfNN_ia64_allocate_data data;
data.info = link_info;
data.ofs = 0;
ia64_info->self_dtpmod_offset = (bfd_vma) -1;
elfNN_ia64_dyn_sym_traverse (ia64_info, allocate_global_data_got, &data);
elfNN_ia64_dyn_sym_traverse (ia64_info, allocate_global_fptr_got, &data);
elfNN_ia64_dyn_sym_traverse (ia64_info, allocate_local_got, &data);
ia64_info->root.sgot->size = data.ofs;
if (ia64_info->root.dynamic_sections_created
&& ia64_info->root.srelgot != NULL)
{
/* Resize .rela.got. */
ia64_info->root.srelgot->size = 0;
if (bfd_link_pic (link_info)
&& ia64_info->self_dtpmod_offset != (bfd_vma) -1)
ia64_info->root.srelgot->size += sizeof (ElfNN_External_Rela);
data.only_got = TRUE;
elfNN_ia64_dyn_sym_traverse (ia64_info, allocate_dynrel_entries,
&data);
}
}
if (link_info->relax_pass == 0)
{
/* Pass 0 is only needed to relax br. */
sec->skip_relax_pass_0 = skip_relax_pass_0;
sec->skip_relax_pass_1 = skip_relax_pass_1;
}
*again = changed_contents || changed_relocs;
return TRUE;
error_return:
if (isymbuf != NULL && (unsigned char *) isymbuf != symtab_hdr->contents)
free (isymbuf);
if (contents != NULL
&& elf_section_data (sec)->this_hdr.contents != contents)
free (contents);
if (internal_relocs != NULL
&& elf_section_data (sec)->relocs != internal_relocs)
free (internal_relocs);
return FALSE;
}
#undef skip_relax_pass_0
#undef skip_relax_pass_1
/* Return TRUE if NAME is an unwind table section name. */
static inline bfd_boolean
is_unwind_section_name (bfd *abfd, const char *name)
{
if (elfNN_ia64_hpux_vec (abfd->xvec)
&& !strcmp (name, ELF_STRING_ia64_unwind_hdr))
return FALSE;
return ((CONST_STRNEQ (name, ELF_STRING_ia64_unwind)
&& ! CONST_STRNEQ (name, ELF_STRING_ia64_unwind_info))
|| CONST_STRNEQ (name, ELF_STRING_ia64_unwind_once));
}
/* Handle an IA-64 specific section when reading an object file. This
is called when bfd_section_from_shdr finds a section with an unknown
type. */
static bfd_boolean
elfNN_ia64_section_from_shdr (bfd *abfd,
Elf_Internal_Shdr *hdr,
const char *name,
int shindex)
{
/* There ought to be a place to keep ELF backend specific flags, but
at the moment there isn't one. We just keep track of the
sections by their name, instead. Fortunately, the ABI gives
suggested names for all the MIPS specific sections, so we will
probably get away with this. */
switch (hdr->sh_type)
{
case SHT_IA_64_UNWIND:
case SHT_IA_64_HP_OPT_ANOT:
break;
case SHT_IA_64_EXT:
if (strcmp (name, ELF_STRING_ia64_archext) != 0)
return FALSE;
break;
default:
return FALSE;
}
if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
return FALSE;
return TRUE;
}
/* Convert IA-64 specific section flags to bfd internal section flags. */
/* ??? There is no bfd internal flag equivalent to the SHF_IA_64_NORECOV
flag. */
static bfd_boolean
elfNN_ia64_section_flags (flagword *flags,
const Elf_Internal_Shdr *hdr)
{
if (hdr->sh_flags & SHF_IA_64_SHORT)
*flags |= SEC_SMALL_DATA;
return TRUE;
}
/* Set the correct type for an IA-64 ELF section. We do this by the
section name, which is a hack, but ought to work. */
static bfd_boolean
elfNN_ia64_fake_sections (bfd *abfd, Elf_Internal_Shdr *hdr,
asection *sec)
{
const char *name;
name = bfd_get_section_name (abfd, sec);
if (is_unwind_section_name (abfd, name))
{
/* We don't have the sections numbered at this point, so sh_info
is set later, in elfNN_ia64_final_write_processing. */
hdr->sh_type = SHT_IA_64_UNWIND;
hdr->sh_flags |= SHF_LINK_ORDER;
}
else if (strcmp (name, ELF_STRING_ia64_archext) == 0)
hdr->sh_type = SHT_IA_64_EXT;
else if (strcmp (name, ".HP.opt_annot") == 0)
hdr->sh_type = SHT_IA_64_HP_OPT_ANOT;
else if (strcmp (name, ".reloc") == 0)
/* This is an ugly, but unfortunately necessary hack that is
needed when producing EFI binaries on IA-64. It tells
elf.c:elf_fake_sections() not to consider ".reloc" as a section
containing ELF relocation info. We need this hack in order to
be able to generate ELF binaries that can be translated into
EFI applications (which are essentially COFF objects). Those
files contain a COFF ".reloc" section inside an ELFNN object,
which would normally cause BFD to segfault because it would
attempt to interpret this section as containing relocation
entries for section "oc". With this hack enabled, ".reloc"
will be treated as a normal data section, which will avoid the
segfault. However, you won't be able to create an ELFNN binary
with a section named "oc" that needs relocations, but that's
the kind of ugly side-effects you get when detecting section
types based on their names... In practice, this limitation is
unlikely to bite. */
hdr->sh_type = SHT_PROGBITS;
if (sec->flags & SEC_SMALL_DATA)
hdr->sh_flags |= SHF_IA_64_SHORT;
/* Some HP linkers look for the SHF_IA_64_HP_TLS flag instead of SHF_TLS. */
if (elfNN_ia64_hpux_vec (abfd->xvec) && (sec->flags & SHF_TLS))
hdr->sh_flags |= SHF_IA_64_HP_TLS;
return TRUE;
}
/* The final processing done just before writing out an IA-64 ELF
object file. */
static void
elfNN_ia64_final_write_processing (bfd *abfd,
bfd_boolean linker ATTRIBUTE_UNUSED)
{
Elf_Internal_Shdr *hdr;
asection *s;
for (s = abfd->sections; s; s = s->next)
{
hdr = &elf_section_data (s)->this_hdr;
switch (hdr->sh_type)
{
case SHT_IA_64_UNWIND:
/* The IA-64 processor-specific ABI requires setting sh_link
to the unwind section, whereas HP-UX requires sh_info to
do so. For maximum compatibility, we'll set both for
now... */
hdr->sh_info = hdr->sh_link;
break;
}
}
if (! elf_flags_init (abfd))
{
unsigned long flags = 0;
if (abfd->xvec->byteorder == BFD_ENDIAN_BIG)
flags |= EF_IA_64_BE;
if (bfd_get_mach (abfd) == bfd_mach_ia64_elf64)
flags |= EF_IA_64_ABI64;
elf_elfheader(abfd)->e_flags = flags;
elf_flags_init (abfd) = TRUE;
}
}
/* Hook called by the linker routine which adds symbols from an object
file. We use it to put .comm items in .sbss, and not .bss. */
static bfd_boolean
elfNN_ia64_add_symbol_hook (bfd *abfd,
struct bfd_link_info *info,
Elf_Internal_Sym *sym,
const char **namep ATTRIBUTE_UNUSED,
flagword *flagsp ATTRIBUTE_UNUSED,
asection **secp,
bfd_vma *valp)
{
if (sym->st_shndx == SHN_COMMON
&& !bfd_link_relocatable (info)
&& sym->st_size <= elf_gp_size (abfd))
{
/* Common symbols less than or equal to -G nn bytes are
automatically put into .sbss. */
asection *scomm = bfd_get_section_by_name (abfd, ".scommon");
if (scomm == NULL)
{
scomm = bfd_make_section_with_flags (abfd, ".scommon",
(SEC_ALLOC
| SEC_IS_COMMON
| SEC_LINKER_CREATED));
if (scomm == NULL)
return FALSE;
}
*secp = scomm;
*valp = sym->st_size;
}
return TRUE;
}
/* Return the number of additional phdrs we will need. */
static int
elfNN_ia64_additional_program_headers (bfd *abfd,
struct bfd_link_info *info ATTRIBUTE_UNUSED)
{
asection *s;
int ret = 0;
/* See if we need a PT_IA_64_ARCHEXT segment. */
s = bfd_get_section_by_name (abfd, ELF_STRING_ia64_archext);
if (s && (s->flags & SEC_LOAD))
++ret;
/* Count how many PT_IA_64_UNWIND segments we need. */
for (s = abfd->sections; s; s = s->next)
if (is_unwind_section_name (abfd, s->name) && (s->flags & SEC_LOAD))
++ret;
return ret;
}
static bfd_boolean
elfNN_ia64_modify_segment_map (bfd *abfd,
struct bfd_link_info *info ATTRIBUTE_UNUSED)
{
struct elf_segment_map *m, **pm;
Elf_Internal_Shdr *hdr;
asection *s;
/* If we need a PT_IA_64_ARCHEXT segment, it must come before
all PT_LOAD segments. */
s = bfd_get_section_by_name (abfd, ELF_STRING_ia64_archext);
if (s && (s->flags & SEC_LOAD))
{
for (m = elf_seg_map (abfd); m != NULL; m = m->next)
if (m->p_type == PT_IA_64_ARCHEXT)
break;
if (m == NULL)
{
m = ((struct elf_segment_map *)
bfd_zalloc (abfd, (bfd_size_type) sizeof *m));
if (m == NULL)
return FALSE;
m->p_type = PT_IA_64_ARCHEXT;
m->count = 1;
m->sections[0] = s;
/* We want to put it after the PHDR and INTERP segments. */
pm = &elf_seg_map (abfd);
while (*pm != NULL
&& ((*pm)->p_type == PT_PHDR
|| (*pm)->p_type == PT_INTERP))
pm = &(*pm)->next;
m->next = *pm;
*pm = m;
}
}
/* Install PT_IA_64_UNWIND segments, if needed. */
for (s = abfd->sections; s; s = s->next)
{
hdr = &elf_section_data (s)->this_hdr;
if (hdr->sh_type != SHT_IA_64_UNWIND)
continue;
if (s && (s->flags & SEC_LOAD))
{
for (m = elf_seg_map (abfd); m != NULL; m = m->next)
if (m->p_type == PT_IA_64_UNWIND)
{
int i;
/* Look through all sections in the unwind segment
for a match since there may be multiple sections
to a segment. */
for (i = m->count - 1; i >= 0; --i)
if (m->sections[i] == s)
break;
if (i >= 0)
break;
}
if (m == NULL)
{
m = ((struct elf_segment_map *)
bfd_zalloc (abfd, (bfd_size_type) sizeof *m));
if (m == NULL)
return FALSE;
m->p_type = PT_IA_64_UNWIND;
m->count = 1;
m->sections[0] = s;
m->next = NULL;
/* We want to put it last. */
pm = &elf_seg_map (abfd);
while (*pm != NULL)
pm = &(*pm)->next;
*pm = m;
}
}
}
return TRUE;
}
/* Turn on PF_IA_64_NORECOV if needed. This involves traversing all of
the input sections for each output section in the segment and testing
for SHF_IA_64_NORECOV on each. */
static bfd_boolean
elfNN_ia64_modify_program_headers (bfd *abfd,
struct bfd_link_info *info ATTRIBUTE_UNUSED)
{
struct elf_obj_tdata *tdata = elf_tdata (abfd);
struct elf_segment_map *m;
Elf_Internal_Phdr *p;
for (p = tdata->phdr, m = elf_seg_map (abfd); m != NULL; m = m->next, p++)
if (m->p_type == PT_LOAD)
{
int i;
for (i = m->count - 1; i >= 0; --i)
{
struct bfd_link_order *order = m->sections[i]->map_head.link_order;
while (order != NULL)
{
if (order->type == bfd_indirect_link_order)
{
asection *is = order->u.indirect.section;
bfd_vma flags = elf_section_data(is)->this_hdr.sh_flags;
if (flags & SHF_IA_64_NORECOV)
{
p->p_flags |= PF_IA_64_NORECOV;
goto found;
}
}
order = order->next;
}
}
found:;
}
return TRUE;
}
/* According to the Tahoe assembler spec, all labels starting with a
'.' are local. */
static bfd_boolean
elfNN_ia64_is_local_label_name (bfd *abfd ATTRIBUTE_UNUSED,
const char *name)
{
return name[0] == '.';
}
/* Should we do dynamic things to this symbol? */
static bfd_boolean
elfNN_ia64_dynamic_symbol_p (struct elf_link_hash_entry *h,
struct bfd_link_info *info, int r_type)
{
bfd_boolean ignore_protected
= ((r_type & 0xf8) == 0x40 /* FPTR relocs */
|| (r_type & 0xf8) == 0x50); /* LTOFF_FPTR relocs */
return _bfd_elf_dynamic_symbol_p (h, info, ignore_protected);
}
static struct bfd_hash_entry*
elfNN_ia64_new_elf_hash_entry (struct bfd_hash_entry *entry,
struct bfd_hash_table *table,
const char *string)
{
struct elfNN_ia64_link_hash_entry *ret;
ret = (struct elfNN_ia64_link_hash_entry *) entry;
/* Allocate the structure if it has not already been allocated by a
subclass. */
if (!ret)
ret = bfd_hash_allocate (table, sizeof (*ret));
if (!ret)
return 0;
/* Call the allocation method of the superclass. */
ret = ((struct elfNN_ia64_link_hash_entry *)
_bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
table, string));
ret->info = NULL;
ret->count = 0;
ret->sorted_count = 0;
ret->size = 0;
return (struct bfd_hash_entry *) ret;
}
static void
elfNN_ia64_hash_copy_indirect (struct bfd_link_info *info,
struct elf_link_hash_entry *xdir,
struct elf_link_hash_entry *xind)
{
struct elfNN_ia64_link_hash_entry *dir, *ind;
dir = (struct elfNN_ia64_link_hash_entry *) xdir;
ind = (struct elfNN_ia64_link_hash_entry *) xind;
/* Copy down any references that we may have already seen to the
symbol which just became indirect. */
dir->root.ref_dynamic |= ind->root.ref_dynamic;
dir->root.ref_regular |= ind->root.ref_regular;
dir->root.ref_regular_nonweak |= ind->root.ref_regular_nonweak;
dir->root.needs_plt |= ind->root.needs_plt;
if (ind->root.root.type != bfd_link_hash_indirect)
return;
/* Copy over the got and plt data. This would have been done
by check_relocs. */
if (ind->info != NULL)
{
struct elfNN_ia64_dyn_sym_info *dyn_i;
unsigned int count;
if (dir->info)
free (dir->info);
dir->info = ind->info;
dir->count = ind->count;
dir->sorted_count = ind->sorted_count;
dir->size = ind->size;
ind->info = NULL;
ind->count = 0;
ind->sorted_count = 0;
ind->size = 0;
/* Fix up the dyn_sym_info pointers to the global symbol. */
for (count = dir->count, dyn_i = dir->info;
count != 0;
count--, dyn_i++)
dyn_i->h = &dir->root;
}
/* Copy over the dynindx. */
if (ind->root.dynindx != -1)
{
if (dir->root.dynindx != -1)
_bfd_elf_strtab_delref (elf_hash_table (info)->dynstr,
dir->root.dynstr_index);
dir->root.dynindx = ind->root.dynindx;
dir->root.dynstr_index = ind->root.dynstr_index;
ind->root.dynindx = -1;
ind->root.dynstr_index = 0;
}
}
static void
elfNN_ia64_hash_hide_symbol (struct bfd_link_info *info,
struct elf_link_hash_entry *xh,
bfd_boolean force_local)
{
struct elfNN_ia64_link_hash_entry *h;
struct elfNN_ia64_dyn_sym_info *dyn_i;
unsigned int count;
h = (struct elfNN_ia64_link_hash_entry *)xh;
_bfd_elf_link_hash_hide_symbol (info, &h->root, force_local);
for (count = h->count, dyn_i = h->info;
count != 0;
count--, dyn_i++)
{
dyn_i->want_plt2 = 0;
dyn_i->want_plt = 0;
}
}
/* Compute a hash of a local hash entry. */
static hashval_t
elfNN_ia64_local_htab_hash (const void *ptr)
{
struct elfNN_ia64_local_hash_entry *entry
= (struct elfNN_ia64_local_hash_entry *) ptr;
return ELF_LOCAL_SYMBOL_HASH (entry->id, entry->r_sym);
}
/* Compare local hash entries. */
static int
elfNN_ia64_local_htab_eq (const void *ptr1, const void *ptr2)
{
struct elfNN_ia64_local_hash_entry *entry1
= (struct elfNN_ia64_local_hash_entry *) ptr1;
struct elfNN_ia64_local_hash_entry *entry2
= (struct elfNN_ia64_local_hash_entry *) ptr2;
return entry1->id == entry2->id && entry1->r_sym == entry2->r_sym;
}
/* Free the global elfNN_ia64_dyn_sym_info array. */
static bfd_boolean
elfNN_ia64_global_dyn_info_free (void **xentry,
void * unused ATTRIBUTE_UNUSED)
{
struct elfNN_ia64_link_hash_entry *entry
= (struct elfNN_ia64_link_hash_entry *) xentry;
if (entry->info)
{
free (entry->info);
entry->info = NULL;
entry->count = 0;
entry->sorted_count = 0;
entry->size = 0;
}
return TRUE;
}
/* Free the local elfNN_ia64_dyn_sym_info array. */
static bfd_boolean
elfNN_ia64_local_dyn_info_free (void **slot,
void * unused ATTRIBUTE_UNUSED)
{
struct elfNN_ia64_local_hash_entry *entry
= (struct elfNN_ia64_local_hash_entry *) *slot;
if (entry->info)
{
free (entry->info);
entry->info = NULL;
entry->count = 0;
entry->sorted_count = 0;
entry->size = 0;
}
return TRUE;
}
/* Destroy IA-64 linker hash table. */
static void
elfNN_ia64_link_hash_table_free (bfd *obfd)
{
struct elfNN_ia64_link_hash_table *ia64_info
= (struct elfNN_ia64_link_hash_table *) obfd->link.hash;
if (ia64_info->loc_hash_table)
{
htab_traverse (ia64_info->loc_hash_table,
elfNN_ia64_local_dyn_info_free, NULL);
htab_delete (ia64_info->loc_hash_table);
}
if (ia64_info->loc_hash_memory)
objalloc_free ((struct objalloc *) ia64_info->loc_hash_memory);
elf_link_hash_traverse (&ia64_info->root,
elfNN_ia64_global_dyn_info_free, NULL);
_bfd_elf_link_hash_table_free (obfd);
}
/* Create the derived linker hash table. The IA-64 ELF port uses this
derived hash table to keep information specific to the IA-64 ElF
linker (without using static variables). */
static struct bfd_link_hash_table *
elfNN_ia64_hash_table_create (bfd *abfd)
{
struct elfNN_ia64_link_hash_table *ret;
ret = bfd_zmalloc ((bfd_size_type) sizeof (*ret));
if (!ret)
return NULL;
if (!_bfd_elf_link_hash_table_init (&ret->root, abfd,
elfNN_ia64_new_elf_hash_entry,
sizeof (struct elfNN_ia64_link_hash_entry),
IA64_ELF_DATA))
{
free (ret);
return NULL;
}
ret->loc_hash_table = htab_try_create (1024, elfNN_ia64_local_htab_hash,
elfNN_ia64_local_htab_eq, NULL);
ret->loc_hash_memory = objalloc_create ();
if (!ret->loc_hash_table || !ret->loc_hash_memory)
{
elfNN_ia64_link_hash_table_free (abfd);
return NULL;
}
ret->root.root.hash_table_free = elfNN_ia64_link_hash_table_free;
return &ret->root.root;
}
/* Traverse both local and global hash tables. */
struct elfNN_ia64_dyn_sym_traverse_data
{
bfd_boolean (*func) (struct elfNN_ia64_dyn_sym_info *, void *);
void * data;
};
static bfd_boolean
elfNN_ia64_global_dyn_sym_thunk (struct bfd_hash_entry *xentry,
void * xdata)
{
struct elfNN_ia64_link_hash_entry *entry
= (struct elfNN_ia64_link_hash_entry *) xentry;
struct elfNN_ia64_dyn_sym_traverse_data *data
= (struct elfNN_ia64_dyn_sym_traverse_data *) xdata;
struct elfNN_ia64_dyn_sym_info *dyn_i;
unsigned int count;
for (count = entry->count, dyn_i = entry->info;
count != 0;
count--, dyn_i++)
if (! (*data->func) (dyn_i, data->data))
return FALSE;
return TRUE;
}
static bfd_boolean
elfNN_ia64_local_dyn_sym_thunk (void **slot, void * xdata)
{
struct elfNN_ia64_local_hash_entry *entry
= (struct elfNN_ia64_local_hash_entry *) *slot;
struct elfNN_ia64_dyn_sym_traverse_data *data
= (struct elfNN_ia64_dyn_sym_traverse_data *) xdata;
struct elfNN_ia64_dyn_sym_info *dyn_i;
unsigned int count;
for (count = entry->count, dyn_i = entry->info;
count != 0;
count--, dyn_i++)
if (! (*data->func) (dyn_i, data->data))
return FALSE;
return TRUE;
}
static void
elfNN_ia64_dyn_sym_traverse (struct elfNN_ia64_link_hash_table *ia64_info,
bfd_boolean (*func) (struct elfNN_ia64_dyn_sym_info *, void *),
void * data)
{
struct elfNN_ia64_dyn_sym_traverse_data xdata;
xdata.func = func;
xdata.data = data;
elf_link_hash_traverse (&ia64_info->root,
elfNN_ia64_global_dyn_sym_thunk, &xdata);
htab_traverse (ia64_info->loc_hash_table,
elfNN_ia64_local_dyn_sym_thunk, &xdata);
}
static bfd_boolean
elfNN_ia64_create_dynamic_sections (bfd *abfd,
struct bfd_link_info *info)
{
struct elfNN_ia64_link_hash_table *ia64_info;
asection *s;
if (! _bfd_elf_create_dynamic_sections (abfd, info))
return FALSE;
ia64_info = elfNN_ia64_hash_table (info);
if (ia64_info == NULL)
return FALSE;
{
flagword flags = bfd_get_section_flags (abfd, ia64_info->root.sgot);
bfd_set_section_flags (abfd, ia64_info->root.sgot,
SEC_SMALL_DATA | flags);
/* The .got section is always aligned at 8 bytes. */
if (! bfd_set_section_alignment (abfd, ia64_info->root.sgot, 3))
return FALSE;
}
if (!get_pltoff (abfd, info, ia64_info))
return FALSE;
s = bfd_make_section_anyway_with_flags (abfd, ".rela.IA_64.pltoff",
(SEC_ALLOC | SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
| SEC_LINKER_CREATED
| SEC_READONLY));
if (s == NULL
|| !bfd_set_section_alignment (abfd, s, LOG_SECTION_ALIGN))
return FALSE;
ia64_info->rel_pltoff_sec = s;
return TRUE;
}
/* Find and/or create a hash entry for local symbol. */
static struct elfNN_ia64_local_hash_entry *
get_local_sym_hash (struct elfNN_ia64_link_hash_table *ia64_info,
bfd *abfd, const Elf_Internal_Rela *rel,
bfd_boolean create)
{
struct elfNN_ia64_local_hash_entry e, *ret;
asection *sec = abfd->sections;
hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
ELFNN_R_SYM (rel->r_info));
void **slot;
e.id = sec->id;
e.r_sym = ELFNN_R_SYM (rel->r_info);
slot = htab_find_slot_with_hash (ia64_info->loc_hash_table, &e, h,
create ? INSERT : NO_INSERT);
if (!slot)
return NULL;
if (*slot)
return (struct elfNN_ia64_local_hash_entry *) *slot;
ret = (struct elfNN_ia64_local_hash_entry *)
objalloc_alloc ((struct objalloc *) ia64_info->loc_hash_memory,
sizeof (struct elfNN_ia64_local_hash_entry));
if (ret)
{
memset (ret, 0, sizeof (*ret));
ret->id = sec->id;
ret->r_sym = ELFNN_R_SYM (rel->r_info);
*slot = ret;
}
return ret;
}
/* Used to sort elfNN_ia64_dyn_sym_info array. */
static int
addend_compare (const void *xp, const void *yp)
{
const struct elfNN_ia64_dyn_sym_info *x
= (const struct elfNN_ia64_dyn_sym_info *) xp;
const struct elfNN_ia64_dyn_sym_info *y
= (const struct elfNN_ia64_dyn_sym_info *) yp;
return x->addend < y->addend ? -1 : x->addend > y->addend ? 1 : 0;
}
/* Sort elfNN_ia64_dyn_sym_info array and remove duplicates. */
static unsigned int
sort_dyn_sym_info (struct elfNN_ia64_dyn_sym_info *info,
unsigned int count)
{
bfd_vma curr, prev, got_offset;
unsigned int i, kept, dupes, diff, dest, src, len;
qsort (info, count, sizeof (*info), addend_compare);
/* Find the first duplicate. */
prev = info [0].addend;
got_offset = info [0].got_offset;
for (i = 1; i < count; i++)
{
curr = info [i].addend;
if (curr == prev)
{
/* For duplicates, make sure that GOT_OFFSET is valid. */
if (got_offset == (bfd_vma) -1)
got_offset = info [i].got_offset;
break;
}
got_offset = info [i].got_offset;
prev = curr;
}
/* We may move a block of elements to here. */
dest = i++;
/* Remove duplicates. */
if (i < count)
{
while (i < count)
{
/* For duplicates, make sure that the kept one has a valid
got_offset. */
kept = dest - 1;
if (got_offset != (bfd_vma) -1)
info [kept].got_offset = got_offset;
curr = info [i].addend;
got_offset = info [i].got_offset;
/* Move a block of elements whose first one is different from
the previous. */
if (curr == prev)
{
for (src = i + 1; src < count; src++)
{
if (info [src].addend != curr)
break;
/* For duplicates, make sure that GOT_OFFSET is
valid. */
if (got_offset == (bfd_vma) -1)
got_offset = info [src].got_offset;
}
/* Make sure that the kept one has a valid got_offset. */
if (got_offset != (bfd_vma) -1)
info [kept].got_offset = got_offset;
}
else
src = i;
if (src >= count)
break;
/* Find the next duplicate. SRC will be kept. */
prev = info [src].addend;
got_offset = info [src].got_offset;
for (dupes = src + 1; dupes < count; dupes ++)
{
curr = info [dupes].addend;
if (curr == prev)
{
/* Make sure that got_offset is valid. */
if (got_offset == (bfd_vma) -1)
got_offset = info [dupes].got_offset;
/* For duplicates, make sure that the kept one has
a valid got_offset. */
if (got_offset != (bfd_vma) -1)
info [dupes - 1].got_offset = got_offset;
break;
}
got_offset = info [dupes].got_offset;
prev = curr;
}
/* How much to move. */
len = dupes - src;
i = dupes + 1;
if (len == 1 && dupes < count)
{
/* If we only move 1 element, we combine it with the next
one. There must be at least a duplicate. Find the
next different one. */
for (diff = dupes + 1, src++; diff < count; diff++, src++)
{
if (info [diff].addend != curr)
break;
/* Make sure that got_offset is valid. */
if (got_offset == (bfd_vma) -1)
got_offset = info [diff].got_offset;
}
/* Makre sure that the last duplicated one has an valid
offset. */
BFD_ASSERT (curr == prev);
if (got_offset != (bfd_vma) -1)
info [diff - 1].got_offset = got_offset;
if (diff < count)
{
/* Find the next duplicate. Track the current valid
offset. */
prev = info [diff].addend;
got_offset = info [diff].got_offset;
for (dupes = diff + 1; dupes < count; dupes ++)
{
curr = info [dupes].addend;
if (curr == prev)
{
/* For duplicates, make sure that GOT_OFFSET
is valid. */
if (got_offset == (bfd_vma) -1)
got_offset = info [dupes].got_offset;
break;
}
got_offset = info [dupes].got_offset;
prev = curr;
diff++;
}
len = diff - src + 1;
i = diff + 1;
}
}
memmove (&info [dest], &info [src], len * sizeof (*info));
dest += len;
}
count = dest;
}
else
{
/* When we get here, either there is no duplicate at all or
the only duplicate is the last element. */
if (dest < count)
{
/* If the last element is a duplicate, make sure that the
kept one has a valid got_offset. We also update count. */
if (got_offset != (bfd_vma) -1)
info [dest - 1].got_offset = got_offset;
count = dest;
}
}
return count;
}
/* Find and/or create a descriptor for dynamic symbol info. This will
vary based on global or local symbol, and the addend to the reloc.
We don't sort when inserting. Also, we sort and eliminate
duplicates if there is an unsorted section. Typically, this will
only happen once, because we do all insertions before lookups. We
then use bsearch to do a lookup. This also allows lookups to be
fast. So we have fast insertion (O(log N) due to duplicate check),
fast lookup (O(log N)) and one sort (O(N log N) expected time).
Previously, all lookups were O(N) because of the use of the linked
list and also all insertions were O(N) because of the check for
duplicates. There are some complications here because the array
size grows occasionally, which may add an O(N) factor, but this
should be rare. Also, we free the excess array allocation, which
requires a copy which is O(N), but this only happens once. */
static struct elfNN_ia64_dyn_sym_info *
get_dyn_sym_info (struct elfNN_ia64_link_hash_table *ia64_info,
struct elf_link_hash_entry *h, bfd *abfd,
const Elf_Internal_Rela *rel, bfd_boolean create)
{
struct elfNN_ia64_dyn_sym_info **info_p, *info, *dyn_i, key;
unsigned int *count_p, *sorted_count_p, *size_p;
unsigned int count, sorted_count, size;
bfd_vma addend = rel ? rel->r_addend : 0;
bfd_size_type amt;
if (h)
{
struct elfNN_ia64_link_hash_entry *global_h;
global_h = (struct elfNN_ia64_link_hash_entry *) h;
info_p = &global_h->info;
count_p = &global_h->count;
sorted_count_p = &global_h->sorted_count;
size_p = &global_h->size;
}
else
{
struct elfNN_ia64_local_hash_entry *loc_h;
loc_h = get_local_sym_hash (ia64_info, abfd, rel, create);
if (!loc_h)
{
BFD_ASSERT (!create);
return NULL;
}
info_p = &loc_h->info;
count_p = &loc_h->count;
sorted_count_p = &loc_h->sorted_count;
size_p = &loc_h->size;
}
count = *count_p;
sorted_count = *sorted_count_p;
size = *size_p;
info = *info_p;
if (create)
{
/* When we create the array, we don't check for duplicates,
except in the previously sorted section if one exists, and
against the last inserted entry. This allows insertions to
be fast. */
if (info)
{
if (sorted_count)
{
/* Try bsearch first on the sorted section. */
key.addend = addend;
dyn_i = bsearch (&key, info, sorted_count,
sizeof (*info), addend_compare);
if (dyn_i)
{
return dyn_i;
}
}
/* Do a quick check for the last inserted entry. */
dyn_i = info + count - 1;
if (dyn_i->addend == addend)
{
return dyn_i;
}
}
if (size == 0)
{
/* It is the very first element. We create the array of size
1. */
size = 1;
amt = size * sizeof (*info);
info = bfd_malloc (amt);
}
else if (size <= count)
{
/* We double the array size every time when we reach the
size limit. */
size += size;
amt = size * sizeof (*info);
info = bfd_realloc (info, amt);
}
else
goto has_space;
if (info == NULL)
return NULL;
*size_p = size;
*info_p = info;
has_space:
/* Append the new one to the array. */
dyn_i = info + count;
memset (dyn_i, 0, sizeof (*dyn_i));
dyn_i->got_offset = (bfd_vma) -1;
dyn_i->addend = addend;
/* We increment count only since the new ones are unsorted and
may have duplicate. */
(*count_p)++;
}
else
{
/* It is a lookup without insertion. Sort array if part of the
array isn't sorted. */
if (count != sorted_count)
{
count = sort_dyn_sym_info (info, count);
*count_p = count;
*sorted_count_p = count;
}
/* Free unused memory. */
if (size != count)
{
amt = count * sizeof (*info);
info = bfd_malloc (amt);
if (info != NULL)
{
memcpy (info, *info_p, amt);
free (*info_p);
*size_p = count;
*info_p = info;
}
}
key.addend = addend;
dyn_i = bsearch (&key, info, count,
sizeof (*info), addend_compare);
}
return dyn_i;
}
static asection *
get_got (bfd *abfd, struct bfd_link_info *info,
struct elfNN_ia64_link_hash_table *ia64_info)
{
asection *got;
bfd *dynobj;
got = ia64_info->root.sgot;
if (!got)
{
flagword flags;
dynobj = ia64_info->root.dynobj;
if (!dynobj)
ia64_info->root.dynobj = dynobj = abfd;
if (!_bfd_elf_create_got_section (dynobj, info))
return NULL;
got = ia64_info->root.sgot;
/* The .got section is always aligned at 8 bytes. */
if (!bfd_set_section_alignment (abfd, got, 3))
return NULL;
flags = bfd_get_section_flags (abfd, got);
if (! bfd_set_section_flags (abfd, got, SEC_SMALL_DATA | flags))
return NULL;
}
return got;
}
/* Create function descriptor section (.opd). This section is called .opd
because it contains "official procedure descriptors". The "official"
refers to the fact that these descriptors are used when taking the address
of a procedure, thus ensuring a unique address for each procedure. */
static asection *
get_fptr (bfd *abfd, struct bfd_link_info *info,
struct elfNN_ia64_link_hash_table *ia64_info)
{
asection *fptr;
bfd *dynobj;
fptr = ia64_info->fptr_sec;
if (!fptr)
{
dynobj = ia64_info->root.dynobj;
if (!dynobj)
ia64_info->root.dynobj = dynobj = abfd;
fptr = bfd_make_section_anyway_with_flags (dynobj, ".opd",
(SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
| (bfd_link_pie (info)
? 0 : SEC_READONLY)
| SEC_LINKER_CREATED));
if (!fptr
|| !bfd_set_section_alignment (abfd, fptr, 4))
{
BFD_ASSERT (0);
return NULL;
}
ia64_info->fptr_sec = fptr;
if (bfd_link_pie (info))
{
asection *fptr_rel;
fptr_rel = bfd_make_section_anyway_with_flags (dynobj, ".rela.opd",
(SEC_ALLOC | SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
| SEC_LINKER_CREATED
| SEC_READONLY));
if (fptr_rel == NULL
|| !bfd_set_section_alignment (abfd, fptr_rel,
LOG_SECTION_ALIGN))
{
BFD_ASSERT (0);
return NULL;
}
ia64_info->rel_fptr_sec = fptr_rel;
}
}
return fptr;
}
static asection *
get_pltoff (bfd *abfd, struct bfd_link_info *info ATTRIBUTE_UNUSED,
struct elfNN_ia64_link_hash_table *ia64_info)
{
asection *pltoff;
bfd *dynobj;
pltoff = ia64_info->pltoff_sec;
if (!pltoff)
{
dynobj = ia64_info->root.dynobj;
if (!dynobj)
ia64_info->root.dynobj = dynobj = abfd;
pltoff = bfd_make_section_anyway_with_flags (dynobj,
ELF_STRING_ia64_pltoff,
(SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
| SEC_SMALL_DATA
| SEC_LINKER_CREATED));
if (!pltoff
|| !bfd_set_section_alignment (abfd, pltoff, 4))
{
BFD_ASSERT (0);
return NULL;
}
ia64_info->pltoff_sec = pltoff;
}
return pltoff;
}
static asection *
get_reloc_section (bfd *abfd,
struct elfNN_ia64_link_hash_table *ia64_info,
asection *sec, bfd_boolean create)
{
const char *srel_name;
asection *srel;
bfd *dynobj;
srel_name = (bfd_elf_string_from_elf_section
(abfd, elf_elfheader(abfd)->e_shstrndx,
_bfd_elf_single_rel_hdr (sec)->sh_name));
if (srel_name == NULL)
return NULL;
dynobj = ia64_info->root.dynobj;
if (!dynobj)
ia64_info->root.dynobj = dynobj = abfd;
srel = bfd_get_linker_section (dynobj, srel_name);
if (srel == NULL && create)
{
srel = bfd_make_section_anyway_with_flags (dynobj, srel_name,
(SEC_ALLOC | SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
| SEC_LINKER_CREATED
| SEC_READONLY));
if (srel == NULL
|| !bfd_set_section_alignment (dynobj, srel,
LOG_SECTION_ALIGN))
return NULL;
}
return srel;
}
static bfd_boolean
count_dyn_reloc (bfd *abfd, struct elfNN_ia64_dyn_sym_info *dyn_i,
asection *srel, int type, bfd_boolean reltext)
{
struct elfNN_ia64_dyn_reloc_entry *rent;
for (rent = dyn_i->reloc_entries; rent; rent = rent->next)
if (rent->srel == srel && rent->type == type)
break;
if (!rent)
{
rent = ((struct elfNN_ia64_dyn_reloc_entry *)
bfd_alloc (abfd, (bfd_size_type) sizeof (*rent)));
if (!rent)
return FALSE;
rent->next = dyn_i->reloc_entries;
rent->srel = srel;
rent->type = type;
rent->count = 0;
dyn_i->reloc_entries = rent;
}
rent->reltext = reltext;
rent->count++;
return TRUE;
}
static bfd_boolean
elfNN_ia64_check_relocs (bfd *abfd, struct bfd_link_info *info,
asection *sec,
const Elf_Internal_Rela *relocs)
{
struct elfNN_ia64_link_hash_table *ia64_info;
const Elf_Internal_Rela *relend;
Elf_Internal_Shdr *symtab_hdr;
const Elf_Internal_Rela *rel;
asection *got, *fptr, *srel, *pltoff;
enum {
NEED_GOT = 1,
NEED_GOTX = 2,
NEED_FPTR = 4,
NEED_PLTOFF = 8,
NEED_MIN_PLT = 16,
NEED_FULL_PLT = 32,
NEED_DYNREL = 64,
NEED_LTOFF_FPTR = 128,
NEED_TPREL = 256,
NEED_DTPMOD = 512,
NEED_DTPREL = 1024
};
int need_entry;
struct elf_link_hash_entry *h;
unsigned long r_symndx;
bfd_boolean maybe_dynamic;
if (bfd_link_relocatable (info))
return TRUE;
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
ia64_info = elfNN_ia64_hash_table (info);
if (ia64_info == NULL)
return FALSE;
got = fptr = srel = pltoff = NULL;
relend = relocs + sec->reloc_count;
/* We scan relocations first to create dynamic relocation arrays. We
modified get_dyn_sym_info to allow fast insertion and support fast
lookup in the next loop. */
for (rel = relocs; rel < relend; ++rel)
{
r_symndx = ELFNN_R_SYM (rel->r_info);
if (r_symndx >= symtab_hdr->sh_info)
{
long indx = r_symndx - symtab_hdr->sh_info;
h = elf_sym_hashes (abfd)[indx];
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
}
else
h = NULL;
/* We can only get preliminary data on whether a symbol is
locally or externally defined, as not all of the input files
have yet been processed. Do something with what we know, as
this may help reduce memory usage and processing time later. */
maybe_dynamic = (h && ((!bfd_link_executable (info)
&& (!SYMBOLIC_BIND (info, h)
|| info->unresolved_syms_in_shared_libs == RM_IGNORE))
|| !h->def_regular
|| h->root.type == bfd_link_hash_defweak));
need_entry = 0;
switch (ELFNN_R_TYPE (rel->r_info))
{
case R_IA64_TPREL64MSB:
case R_IA64_TPREL64LSB:
if (bfd_link_pic (info) || maybe_dynamic)
need_entry = NEED_DYNREL;
break;
case R_IA64_LTOFF_TPREL22:
need_entry = NEED_TPREL;
if (bfd_link_pic (info))
info->flags |= DF_STATIC_TLS;
break;
case R_IA64_DTPREL32MSB:
case R_IA64_DTPREL32LSB:
case R_IA64_DTPREL64MSB:
case R_IA64_DTPREL64LSB:
if (bfd_link_pic (info) || maybe_dynamic)
need_entry = NEED_DYNREL;
break;
case R_IA64_LTOFF_DTPREL22:
need_entry = NEED_DTPREL;
break;
case R_IA64_DTPMOD64MSB:
case R_IA64_DTPMOD64LSB:
if (bfd_link_pic (info) || maybe_dynamic)
need_entry = NEED_DYNREL;
break;
case R_IA64_LTOFF_DTPMOD22:
need_entry = NEED_DTPMOD;
break;
case R_IA64_LTOFF_FPTR22:
case R_IA64_LTOFF_FPTR64I:
case R_IA64_LTOFF_FPTR32MSB:
case R_IA64_LTOFF_FPTR32LSB:
case R_IA64_LTOFF_FPTR64MSB:
case R_IA64_LTOFF_FPTR64LSB:
need_entry = NEED_FPTR | NEED_GOT | NEED_LTOFF_FPTR;
break;
case R_IA64_FPTR64I:
case R_IA64_FPTR32MSB:
case R_IA64_FPTR32LSB:
case R_IA64_FPTR64MSB:
case R_IA64_FPTR64LSB:
if (bfd_link_pic (info) || h)
need_entry = NEED_FPTR | NEED_DYNREL;
else
need_entry = NEED_FPTR;
break;
case R_IA64_LTOFF22:
case R_IA64_LTOFF64I:
need_entry = NEED_GOT;
break;
case R_IA64_LTOFF22X:
need_entry = NEED_GOTX;
break;
case R_IA64_PLTOFF22:
case R_IA64_PLTOFF64I:
case R_IA64_PLTOFF64MSB:
case R_IA64_PLTOFF64LSB:
need_entry = NEED_PLTOFF;
if (h)
{
if (maybe_dynamic)
need_entry |= NEED_MIN_PLT;
}
else
{
(*info->callbacks->warning)
(info, _("@pltoff reloc against local symbol"), 0,
abfd, 0, (bfd_vma) 0);
}
break;
case R_IA64_PCREL21B:
case R_IA64_PCREL60B:
/* Depending on where this symbol is defined, we may or may not
need a full plt entry. Only skip if we know we'll not need
the entry -- static or symbolic, and the symbol definition
has already been seen. */
if (maybe_dynamic && rel->r_addend == 0)
need_entry = NEED_FULL_PLT;
break;
case R_IA64_IMM14:
case R_IA64_IMM22:
case R_IA64_IMM64:
case R_IA64_DIR32MSB:
case R_IA64_DIR32LSB:
case R_IA64_DIR64MSB:
case R_IA64_DIR64LSB:
/* Shared objects will always need at least a REL relocation. */
if (bfd_link_pic (info) || maybe_dynamic)
need_entry = NEED_DYNREL;
break;
case R_IA64_IPLTMSB:
case R_IA64_IPLTLSB:
/* Shared objects will always need at least a REL relocation. */
if (bfd_link_pic (info) || maybe_dynamic)
need_entry = NEED_DYNREL;
break;
case R_IA64_PCREL22:
case R_IA64_PCREL64I:
case R_IA64_PCREL32MSB:
case R_IA64_PCREL32LSB:
case R_IA64_PCREL64MSB:
case R_IA64_PCREL64LSB:
if (maybe_dynamic)
need_entry = NEED_DYNREL;
break;
}
if (!need_entry)
continue;
if ((need_entry & NEED_FPTR) != 0
&& rel->r_addend)
{
(*info->callbacks->warning)
(info, _("non-zero addend in @fptr reloc"), 0,
abfd, 0, (bfd_vma) 0);
}
if (get_dyn_sym_info (ia64_info, h, abfd, rel, TRUE) == NULL)
return FALSE;
}
/* Now, we only do lookup without insertion, which is very fast
with the modified get_dyn_sym_info. */
for (rel = relocs; rel < relend; ++rel)
{
struct elfNN_ia64_dyn_sym_info *dyn_i;
int dynrel_type = R_IA64_NONE;
r_symndx = ELFNN_R_SYM (rel->r_info);
if (r_symndx >= symtab_hdr->sh_info)
{
/* We're dealing with a global symbol -- find its hash entry
and mark it as being referenced. */
long indx = r_symndx - symtab_hdr->sh_info;
h = elf_sym_hashes (abfd)[indx];
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
/* PR15323, ref flags aren't set for references in the same
object. */
h->root.non_ir_ref = 1;
h->ref_regular = 1;
}
else
h = NULL;
/* We can only get preliminary data on whether a symbol is
locally or externally defined, as not all of the input files
have yet been processed. Do something with what we know, as
this may help reduce memory usage and processing time later. */
maybe_dynamic = (h && ((!bfd_link_executable (info)
&& (!SYMBOLIC_BIND (info, h)
|| info->unresolved_syms_in_shared_libs == RM_IGNORE))
|| !h->def_regular
|| h->root.type == bfd_link_hash_defweak));
need_entry = 0;
switch (ELFNN_R_TYPE (rel->r_info))
{
case R_IA64_TPREL64MSB:
case R_IA64_TPREL64LSB:
if (bfd_link_pic (info) || maybe_dynamic)
need_entry = NEED_DYNREL;
dynrel_type = R_IA64_TPREL64LSB;
if (bfd_link_pic (info))
info->flags |= DF_STATIC_TLS;
break;
case R_IA64_LTOFF_TPREL22:
need_entry = NEED_TPREL;
if (bfd_link_pic (info))
info->flags |= DF_STATIC_TLS;
break;
case R_IA64_DTPREL32MSB:
case R_IA64_DTPREL32LSB:
case R_IA64_DTPREL64MSB:
case R_IA64_DTPREL64LSB:
if (bfd_link_pic (info) || maybe_dynamic)
need_entry = NEED_DYNREL;
dynrel_type = R_IA64_DTPRELNNLSB;
break;
case R_IA64_LTOFF_DTPREL22:
need_entry = NEED_DTPREL;
break;
case R_IA64_DTPMOD64MSB:
case R_IA64_DTPMOD64LSB:
if (bfd_link_pic (info) || maybe_dynamic)
need_entry = NEED_DYNREL;
dynrel_type = R_IA64_DTPMOD64LSB;
break;
case R_IA64_LTOFF_DTPMOD22:
need_entry = NEED_DTPMOD;
break;
case R_IA64_LTOFF_FPTR22:
case R_IA64_LTOFF_FPTR64I:
case R_IA64_LTOFF_FPTR32MSB:
case R_IA64_LTOFF_FPTR32LSB:
case R_IA64_LTOFF_FPTR64MSB:
case R_IA64_LTOFF_FPTR64LSB:
need_entry = NEED_FPTR | NEED_GOT | NEED_LTOFF_FPTR;
break;
case R_IA64_FPTR64I:
case R_IA64_FPTR32MSB:
case R_IA64_FPTR32LSB:
case R_IA64_FPTR64MSB:
case R_IA64_FPTR64LSB:
if (bfd_link_pic (info) || h)
need_entry = NEED_FPTR | NEED_DYNREL;
else
need_entry = NEED_FPTR;
dynrel_type = R_IA64_FPTRNNLSB;
break;
case R_IA64_LTOFF22:
case R_IA64_LTOFF64I:
need_entry = NEED_GOT;
break;
case R_IA64_LTOFF22X:
need_entry = NEED_GOTX;
break;
case R_IA64_PLTOFF22:
case R_IA64_PLTOFF64I:
case R_IA64_PLTOFF64MSB:
case R_IA64_PLTOFF64LSB:
need_entry = NEED_PLTOFF;
if (h)
{
if (maybe_dynamic)
need_entry |= NEED_MIN_PLT;
}
break;
case R_IA64_PCREL21B:
case R_IA64_PCREL60B:
/* Depending on where this symbol is defined, we may or may not
need a full plt entry. Only skip if we know we'll not need
the entry -- static or symbolic, and the symbol definition
has already been seen. */
if (maybe_dynamic && rel->r_addend == 0)
need_entry = NEED_FULL_PLT;
break;
case R_IA64_IMM14:
case R_IA64_IMM22:
case R_IA64_IMM64:
case R_IA64_DIR32MSB:
case R_IA64_DIR32LSB:
case R_IA64_DIR64MSB:
case R_IA64_DIR64LSB:
/* Shared objects will always need at least a REL relocation. */
if (bfd_link_pic (info) || maybe_dynamic)
need_entry = NEED_DYNREL;
dynrel_type = R_IA64_DIRNNLSB;
break;
case R_IA64_IPLTMSB:
case R_IA64_IPLTLSB:
/* Shared objects will always need at least a REL relocation. */
if (bfd_link_pic (info) || maybe_dynamic)
need_entry = NEED_DYNREL;
dynrel_type = R_IA64_IPLTLSB;
break;
case R_IA64_PCREL22:
case R_IA64_PCREL64I:
case R_IA64_PCREL32MSB:
case R_IA64_PCREL32LSB:
case R_IA64_PCREL64MSB:
case R_IA64_PCREL64LSB:
if (maybe_dynamic)
need_entry = NEED_DYNREL;
dynrel_type = R_IA64_PCRELNNLSB;
break;
}
if (!need_entry)
continue;
dyn_i = get_dyn_sym_info (ia64_info, h, abfd, rel, FALSE);
/* Record whether or not this is a local symbol. */
dyn_i->h = h;
/* Create what's needed. */
if (need_entry & (NEED_GOT | NEED_GOTX | NEED_TPREL
| NEED_DTPMOD | NEED_DTPREL))
{
if (!got)
{
got = get_got (abfd, info, ia64_info);
if (!got)
return FALSE;
}
if (need_entry & NEED_GOT)
dyn_i->want_got = 1;
if (need_entry & NEED_GOTX)
dyn_i->want_gotx = 1;
if (need_entry & NEED_TPREL)
dyn_i->want_tprel = 1;
if (need_entry & NEED_DTPMOD)
dyn_i->want_dtpmod = 1;
if (need_entry & NEED_DTPREL)
dyn_i->want_dtprel = 1;
}
if (need_entry & NEED_FPTR)
{
if (!fptr)
{
fptr = get_fptr (abfd, info, ia64_info);
if (!fptr)
return FALSE;
}
/* FPTRs for shared libraries are allocated by the dynamic
linker. Make sure this local symbol will appear in the
dynamic symbol table. */
if (!h && bfd_link_pic (info))
{
if (! (bfd_elf_link_record_local_dynamic_symbol
(info, abfd, (long) r_symndx)))
return FALSE;
}
dyn_i->want_fptr = 1;
}
if (need_entry & NEED_LTOFF_FPTR)
dyn_i->want_ltoff_fptr = 1;
if (need_entry & (NEED_MIN_PLT | NEED_FULL_PLT))
{
if (!ia64_info->root.dynobj)
ia64_info->root.dynobj = abfd;
h->needs_plt = 1;
dyn_i->want_plt = 1;
}
if (need_entry & NEED_FULL_PLT)
dyn_i->want_plt2 = 1;
if (need_entry & NEED_PLTOFF)
{
/* This is needed here, in case @pltoff is used in a non-shared
link. */
if (!pltoff)
{
pltoff = get_pltoff (abfd, info, ia64_info);
if (!pltoff)
return FALSE;
}
dyn_i->want_pltoff = 1;
}
if ((need_entry & NEED_DYNREL) && (sec->flags & SEC_ALLOC))
{
if (!srel)
{
srel = get_reloc_section (abfd, ia64_info, sec, TRUE);
if (!srel)
return FALSE;
}
if (!count_dyn_reloc (abfd, dyn_i, srel, dynrel_type,
(sec->flags & SEC_READONLY) != 0))
return FALSE;
}
}
return TRUE;
}
/* For cleanliness, and potentially faster dynamic loading, allocate
external GOT entries first. */
static bfd_boolean
allocate_global_data_got (struct elfNN_ia64_dyn_sym_info *dyn_i,
void * data)
{
struct elfNN_ia64_allocate_data *x = (struct elfNN_ia64_allocate_data *)data;
if ((dyn_i->want_got || dyn_i->want_gotx)
&& ! dyn_i->want_fptr
&& elfNN_ia64_dynamic_symbol_p (dyn_i->h, x->info, 0))
{
dyn_i->got_offset = x->ofs;
x->ofs += 8;
}
if (dyn_i->want_tprel)
{
dyn_i->tprel_offset = x->ofs;
x->ofs += 8;
}
if (dyn_i->want_dtpmod)
{
if (elfNN_ia64_dynamic_symbol_p (dyn_i->h, x->info, 0))
{
dyn_i->dtpmod_offset = x->ofs;
x->ofs += 8;
}
else
{
struct elfNN_ia64_link_hash_table *ia64_info;
ia64_info = elfNN_ia64_hash_table (x->info);
if (ia64_info == NULL)
return FALSE;
if (ia64_info->self_dtpmod_offset == (bfd_vma) -1)
{
ia64_info->self_dtpmod_offset = x->ofs;
x->ofs += 8;
}
dyn_i->dtpmod_offset = ia64_info->self_dtpmod_offset;
}
}
if (dyn_i->want_dtprel)
{
dyn_i->dtprel_offset = x->ofs;
x->ofs += 8;
}
return TRUE;
}
/* Next, allocate all the GOT entries used by LTOFF_FPTR relocs. */
static bfd_boolean
allocate_global_fptr_got (struct elfNN_ia64_dyn_sym_info *dyn_i,
void * data)
{
struct elfNN_ia64_allocate_data *x = (struct elfNN_ia64_allocate_data *)data;
if (dyn_i->want_got
&& dyn_i->want_fptr
&& elfNN_ia64_dynamic_symbol_p (dyn_i->h, x->info, R_IA64_FPTRNNLSB))
{
dyn_i->got_offset = x->ofs;
x->ofs += 8;
}
return TRUE;
}
/* Lastly, allocate all the GOT entries for local data. */
static bfd_boolean
allocate_local_got (struct elfNN_ia64_dyn_sym_info *dyn_i,
void * data)
{
struct elfNN_ia64_allocate_data *x = (struct elfNN_ia64_allocate_data *)data;
if ((dyn_i->want_got || dyn_i->want_gotx)
&& !elfNN_ia64_dynamic_symbol_p (dyn_i->h, x->info, 0))
{
dyn_i->got_offset = x->ofs;
x->ofs += 8;
}
return TRUE;
}
/* Search for the index of a global symbol in it's defining object file. */
static long
global_sym_index (struct elf_link_hash_entry *h)
{
struct elf_link_hash_entry **p;
bfd *obj;
BFD_ASSERT (h->root.type == bfd_link_hash_defined
|| h->root.type == bfd_link_hash_defweak);
obj = h->root.u.def.section->owner;
for (p = elf_sym_hashes (obj); *p != h; ++p)
continue;
return p - elf_sym_hashes (obj) + elf_tdata (obj)->symtab_hdr.sh_info;
}
/* Allocate function descriptors. We can do these for every function
in a main executable that is not exported. */
static bfd_boolean
allocate_fptr (struct elfNN_ia64_dyn_sym_info *dyn_i, void * data)
{
struct elfNN_ia64_allocate_data *x = (struct elfNN_ia64_allocate_data *)data;
if (dyn_i->want_fptr)
{
struct elf_link_hash_entry *h = dyn_i->h;
if (h)
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
if (!bfd_link_executable (x->info)
&& (!h
|| ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
|| (h->root.type != bfd_link_hash_undefweak
&& h->root.type != bfd_link_hash_undefined)))
{
if (h && h->dynindx == -1)
{
BFD_ASSERT ((h->root.type == bfd_link_hash_defined)
|| (h->root.type == bfd_link_hash_defweak));
if (!bfd_elf_link_record_local_dynamic_symbol
(x->info, h->root.u.def.section->owner,
global_sym_index (h)))
return FALSE;
}
dyn_i->want_fptr = 0;
}
else if (h == NULL || h->dynindx == -1)
{
dyn_i->fptr_offset = x->ofs;
x->ofs += 16;
}
else
dyn_i->want_fptr = 0;
}
return TRUE;
}
/* Allocate all the minimal PLT entries. */
static bfd_boolean
allocate_plt_entries (struct elfNN_ia64_dyn_sym_info *dyn_i,
void * data)
{
struct elfNN_ia64_allocate_data *x = (struct elfNN_ia64_allocate_data *)data;
if (dyn_i->want_plt)
{
struct elf_link_hash_entry *h = dyn_i->h;
if (h)
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
/* ??? Versioned symbols seem to lose NEEDS_PLT. */
if (elfNN_ia64_dynamic_symbol_p (h, x->info, 0))
{
bfd_size_type offset = x->ofs;
if (offset == 0)
offset = PLT_HEADER_SIZE;
dyn_i->plt_offset = offset;
x->ofs = offset + PLT_MIN_ENTRY_SIZE;
dyn_i->want_pltoff = 1;
}
else
{
dyn_i->want_plt = 0;
dyn_i->want_plt2 = 0;
}
}
return TRUE;
}
/* Allocate all the full PLT entries. */
static bfd_boolean
allocate_plt2_entries (struct elfNN_ia64_dyn_sym_info *dyn_i,
void * data)
{
struct elfNN_ia64_allocate_data *x = (struct elfNN_ia64_allocate_data *)data;
if (dyn_i->want_plt2)
{
struct elf_link_hash_entry *h = dyn_i->h;
bfd_size_type ofs = x->ofs;
dyn_i->plt2_offset = ofs;
x->ofs = ofs + PLT_FULL_ENTRY_SIZE;
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
dyn_i->h->plt.offset = ofs;
}
return TRUE;
}
/* Allocate all the PLTOFF entries requested by relocations and