@@ -2354,6 +2354,9 @@ elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
name can be changed. The only requirement is the %s be present. */
#define STUB_ENTRY_NAME "__%s_veneer"
+/* Stub name for a BTI landing stub. */
+#define BTI_STUB_ENTRY_NAME "__%s_bti_veneer"
+
/* The name of the dynamic interpreter. This is put in the .interp
section. */
#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
@@ -2406,6 +2409,12 @@ static const uint32_t aarch64_long_branch_stub[] =
0x00000000,
};
+static const uint32_t aarch64_bti_direct_branch_stub[] =
+{
+ 0xd503245f, /* bti c */
+ 0x14000000, /* b <label> */
+};
+
static const uint32_t aarch64_erratum_835769_stub[] =
{
0x00000000, /* Placeholder for multiply accumulate. */
@@ -2427,6 +2436,7 @@ enum elf_aarch64_stub_type
aarch64_stub_none,
aarch64_stub_adrp_branch,
aarch64_stub_long_branch,
+ aarch64_stub_bti_direct_branch,
aarch64_stub_erratum_835769_veneer,
aarch64_stub_erratum_843419_veneer,
};
@@ -2455,6 +2465,9 @@ struct elf_aarch64_stub_hash_entry
/* Destination symbol type */
unsigned char st_type;
+ /* The target is also a stub. */
+ bool double_stub;
+
/* Where this stub is being called from, or, in the case of combined
stub sections, the first input section in the group. */
asection *id_sec;
@@ -2679,6 +2692,11 @@ struct elf_aarch64_link_hash_table
unsigned int top_index;
asection **input_list;
+ /* True when two stubs are added where one targets the other, happens
+ when BTI stubs are inserted and then the stub layout must not change
+ during elfNN_aarch64_build_stubs. */
+ bool has_double_stub;
+
/* JUMP_SLOT relocs for variant PCS symbols may be present. */
int variant_pcs;
@@ -2974,14 +2992,6 @@ aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
howto, value) == bfd_reloc_ok;
}
-static enum elf_aarch64_stub_type
-aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
-{
- if (aarch64_valid_for_adrp_p (value, place))
- return aarch64_stub_adrp_branch;
- return aarch64_stub_long_branch;
-}
-
/* Determine the type of stub needed, if any, for a call. */
static enum elf_aarch64_stub_type
@@ -3251,14 +3261,17 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
bfd_vma veneer_entry_loc;
bfd_signed_vma branch_offset = 0;
unsigned int template_size;
+ unsigned int pad_size = 0;
const uint32_t *template;
unsigned int i;
struct bfd_link_info *info;
+ struct elf_aarch64_link_hash_table *htab;
/* Massage our args to the form they really have. */
stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
info = (struct bfd_link_info *) in_arg;
+ htab = elf_aarch64_hash_table (info);
/* Fail if the target section could not be assigned to an output
section. The user should fix his linker script. */
@@ -3271,6 +3284,10 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
stub_sec = stub_entry->stub_sec;
+ /* The layout must not change when a stub may be the target of another. */
+ if (htab->has_double_stub)
+ BFD_ASSERT (stub_entry->stub_offset == stub_sec->size);
+
/* Make a note of the offset within the stubs for this entry. */
stub_entry->stub_offset = stub_sec->size;
loc = stub_sec->contents + stub_entry->stub_offset;
@@ -3289,7 +3306,14 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
/* See if we can relax the stub. */
if (aarch64_valid_for_adrp_p (sym_value, place))
- stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
+ {
+ stub_entry->stub_type = aarch64_stub_adrp_branch;
+
+ /* Avoid the relaxation changing the layout. */
+ if (htab->has_double_stub)
+ pad_size = sizeof (aarch64_long_branch_stub)
+ - sizeof (aarch64_adrp_branch_stub);
+ }
}
switch (stub_entry->stub_type)
@@ -3302,6 +3326,10 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
template = aarch64_long_branch_stub;
template_size = sizeof (aarch64_long_branch_stub);
break;
+ case aarch64_stub_bti_direct_branch:
+ template = aarch64_bti_direct_branch_stub;
+ template_size = sizeof (aarch64_bti_direct_branch_stub);
+ break;
case aarch64_stub_erratum_835769_veneer:
template = aarch64_erratum_835769_stub;
template_size = sizeof (aarch64_erratum_835769_stub);
@@ -3320,6 +3348,7 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
loc += 4;
}
+ template_size += pad_size;
template_size = (template_size + 7) & ~7;
stub_sec->size += template_size;
@@ -3345,6 +3374,12 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
BFD_FAIL ();
break;
+ case aarch64_stub_bti_direct_branch:
+ if (!aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
+ stub_entry->stub_offset + 4, sym_value))
+ BFD_FAIL ();
+ break;
+
case aarch64_stub_erratum_835769_veneer:
veneered_insn_loc = stub_entry->target_section->output_section->vma
+ stub_entry->target_section->output_offset
@@ -3375,7 +3410,8 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
}
/* As above, but don't actually build the stub. Just bump offset so
- we know stub section sizes. */
+ we know stub section sizes and record the offset for each stub so
+ a stub can target another stub (needed for BTI direct branch stub). */
static bool
aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
@@ -3396,6 +3432,9 @@ aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
case aarch64_stub_long_branch:
size = sizeof (aarch64_long_branch_stub);
break;
+ case aarch64_stub_bti_direct_branch:
+ size = sizeof (aarch64_bti_direct_branch_stub);
+ break;
case aarch64_stub_erratum_835769_veneer:
size = sizeof (aarch64_erratum_835769_stub);
break;
@@ -3411,10 +3450,20 @@ aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
}
size = (size + 7) & ~7;
+ stub_entry->stub_offset = stub_entry->stub_sec->size;
stub_entry->stub_sec->size += size;
return true;
}
+/* Output is BTI compatible. */
+
+static bool
+elf_aarch64_bti_p (bfd *output_bfd)
+{
+ uint32_t prop = elf_aarch64_tdata (output_bfd)->gnu_and_prop;
+ return prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
+}
+
/* External entry points for sizing and building linker stubs. */
/* Set up various things so that we can make a list of input sections
@@ -3621,6 +3670,16 @@ group_sections (struct elf_aarch64_link_hash_table *htab,
#undef PREV_SEC
#undef PREV_SEC
+/* True if the inserted stub does not break BTI compatibility. */
+
+static bool
+aarch64_bti_stub_p (struct elf_aarch64_stub_hash_entry *stub_entry)
+{
+ /* Stubs without indirect branch are BTI compatible. */
+ return stub_entry->stub_type != aarch64_stub_adrp_branch
+ && stub_entry->stub_type != aarch64_stub_long_branch;
+}
+
#define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
#define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
@@ -4080,7 +4139,10 @@ _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
/* Ignore non-stub sections. */
if (!strstr (section->name, STUB_SUFFIX))
continue;
- section->size = 0;
+
+ /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned,
+ as long branch stubs contain a 64-bit address. */
+ section->size = 8;
}
bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
@@ -4091,10 +4153,9 @@ _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
if (!strstr (section->name, STUB_SUFFIX))
continue;
- /* Add space for a branch. Add 8 bytes to keep section 8 byte aligned,
- as long branch stubs contain a 64-bit address. */
- if (section->size)
- section->size += 8;
+ /* Empty stub section. */
+ if (section->size == 8)
+ section->size = 0;
/* Ensure all stub sections have a size which is a multiple of
4096. This is important in order to ensure that the insertion
@@ -4276,6 +4337,7 @@ _bfd_aarch64_add_call_stub_entries (bool *stub_changed, bfd *output_bfd,
struct bfd_link_info *info)
{
struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
+ bool need_bti = elf_aarch64_bti_p (output_bfd);
bfd *input_bfd;
for (input_bfd = info->input_bfds; input_bfd != NULL;
@@ -4327,13 +4389,16 @@ _bfd_aarch64_add_call_stub_entries (bool *stub_changed, bfd *output_bfd,
unsigned int r_type, r_indx;
enum elf_aarch64_stub_type stub_type;
struct elf_aarch64_stub_hash_entry *stub_entry;
+ struct elf_aarch64_stub_hash_entry *stub_entry_bti;
asection *sym_sec;
bfd_vma sym_value;
bfd_vma destination;
struct elf_aarch64_link_hash_entry *hash;
const char *sym_name;
char *stub_name;
+ char *stub_name_bti;
const asection *id_sec;
+ const asection *id_sec_bti;
unsigned char st_type;
bfd_size_type len;
@@ -4498,6 +4563,25 @@ _bfd_aarch64_add_call_stub_entries (bool *stub_changed, bfd *output_bfd,
/* Always update this stub's target since it may have
changed after layout. */
stub_entry->target_value = sym_value + irela->r_addend;
+
+ if (stub_entry->double_stub)
+ {
+ /* Update the target of both stubs. */
+
+ id_sec_bti = htab->stub_group[sym_sec->id].link_sec;
+ stub_name_bti =
+ elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash,
+ irela);
+ if (!stub_name_bti)
+ goto error_ret_free_internal;
+ stub_entry_bti =
+ aarch64_stub_hash_lookup (&htab->stub_hash_table,
+ stub_name_bti, false, false);
+ BFD_ASSERT (stub_entry_bti != NULL);
+ free (stub_name_bti);
+ stub_entry_bti->target_value = stub_entry->target_value;
+ stub_entry->target_value = stub_entry_bti->stub_offset;
+ }
continue;
}
@@ -4528,6 +4612,60 @@ _bfd_aarch64_add_call_stub_entries (bool *stub_changed, bfd *output_bfd,
snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
sym_name);
+ /* A stub with indirect jump may break BTI compatibility, so
+ insert another stub with direct jump near the target then. */
+ if (need_bti && !aarch64_bti_stub_p (stub_entry))
+ {
+ stub_entry->double_stub = true;
+ htab->has_double_stub = true;
+ id_sec_bti = htab->stub_group[sym_sec->id].link_sec;
+ stub_name_bti =
+ elfNN_aarch64_stub_name (id_sec_bti, sym_sec, hash, irela);
+ if (!stub_name_bti)
+ {
+ free (stub_name);
+ goto error_ret_free_internal;
+ }
+
+ stub_entry_bti =
+ aarch64_stub_hash_lookup (&htab->stub_hash_table,
+ stub_name_bti, false, false);
+ if (stub_entry_bti == NULL)
+ stub_entry_bti =
+ _bfd_aarch64_add_stub_entry_in_group (stub_name_bti,
+ sym_sec, htab);
+ if (stub_entry_bti == NULL)
+ {
+ free (stub_name);
+ free (stub_name_bti);
+ goto error_ret_free_internal;
+ }
+
+ stub_entry_bti->target_value = sym_value + irela->r_addend;
+ stub_entry_bti->target_section = sym_sec;
+ stub_entry_bti->stub_type = aarch64_stub_bti_direct_branch;
+ stub_entry_bti->h = hash;
+ stub_entry_bti->st_type = st_type;
+
+ len = sizeof (BTI_STUB_ENTRY_NAME) + strlen (sym_name);
+ stub_entry_bti->output_name = bfd_alloc (htab->stub_bfd, len);
+ if (stub_entry_bti->output_name == NULL)
+ {
+ free (stub_name);
+ free (stub_name_bti);
+ goto error_ret_free_internal;
+ }
+ snprintf (stub_entry_bti->output_name, len,
+ BTI_STUB_ENTRY_NAME, sym_name);
+
+ /* Update the indirect call stub to target the BTI stub. */
+ stub_entry->target_value = 0;
+ stub_entry->target_section = stub_entry_bti->stub_sec;
+ stub_entry->stub_type = stub_type;
+ stub_entry->h = NULL;
+ stub_entry->st_type = STT_FUNC;
+ }
+
*stub_changed = true;
}
@@ -8443,6 +8581,13 @@ aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
return false;
break;
+ case aarch64_stub_bti_direct_branch:
+ if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
+ sizeof (aarch64_bti_direct_branch_stub)))
+ return false;
+ if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
+ return false;
+ break;
case aarch64_stub_erratum_835769_veneer:
if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
sizeof (aarch64_erratum_835769_stub)))
@@ -461,6 +461,9 @@ run_dump_test "bti-warn"
run_dump_test "weak-tls"
run_dump_test "undef-tls"
+run_dump_test "bti-far-1"
+run_dump_test "bti-far-2"
+
if { ![skip_sframe_tests] } {
run_dump_test "sframe-simple-1"
}
new file mode 100644
@@ -0,0 +1,83 @@
+#name: Check linker stubs with indirect calls handle BTI (shared lib).
+#source: bti-far.s
+#target: [check_shared_lib_support]
+#as: -mabi=lp64
+#ld: -shared -T bti-far.ld
+#objdump: -dr
+
+[^:]*: *file format elf64-.*aarch64
+
+
+Disassembly of section \.plt:
+
+0000000000018000 <\.plt>:
+ 18000: d503245f bti c
+ 18004: a9bf7bf0 stp x16, x30, \[sp, #-16\]!
+ 18008: 900000d0 adrp x16, 30000 <_GLOBAL_OFFSET_TABLE_>
+ 1800c: f9400e11 ldr x17, \[x16, #24\]
+ 18010: 91006210 add x16, x16, #0x18
+ 18014: d61f0220 br x17
+ 18018: d503201f nop
+ 1801c: d503201f nop
+
+0000000000018020 <foo@plt>:
+ 18020: 900000d0 adrp x16, 30000 <_GLOBAL_OFFSET_TABLE_>
+ 18024: f9401211 ldr x17, \[x16, #32\]
+ 18028: 91008210 add x16, x16, #0x20
+ 1802c: d61f0220 br x17
+ 18030: 14000004 b 18040 <__foo_bti_veneer\+0x8>
+ 18034: d503201f nop
+
+0000000000018038 <__foo_bti_veneer>:
+ 18038: d503245f bti c
+ 1803c: 17fffff9 b 18020 <foo@plt>
+
+Disassembly of section \.text:
+
+0000000000020000 <_start>:
+ 20000: 97ffe008 bl 18020 <foo@plt>
+ 20004: 94000007 bl 20020 <___veneer>
+ 20008: 94000001 bl 2000c <baz>
+
+000000000002000c <baz>:
+ 2000c: d65f03c0 ret
+ 20010: 1400000a b 20038 <___veneer\+0x18>
+ 20014: d503201f nop
+
+0000000000020018 <___bti_veneer>:
+ 20018: d503245f bti c
+ 2001c: 17fffffc b 2000c <baz>
+
+0000000000020020 <___veneer>:
+ 20020: 90091910 adrp x16, 12340000 <foo>
+ 20024: 91012210 add x16, x16, #0x48
+ 20028: d61f0200 br x16
+ \.\.\.
+
+Disassembly of section \.far:
+
+0000000012340000 <foo>:
+ 12340000: 9400000c bl 12340030 <___veneer>
+
+0000000012340004 <bar>:
+ 12340004: 9400000b bl 12340030 <___veneer>
+ 12340008: 94000004 bl 12340018 <__foo_veneer>
+ 1234000c: 00000000 udf #0
+ 12340010: 14000010 b 12340050 <___bti_veneer\+0x8>
+ 12340014: d503201f nop
+
+0000000012340018 <__foo_veneer>:
+ 12340018: 90f6e6d0 adrp x16, 18000 <\.plt>
+ 1234001c: 9100e210 add x16, x16, #0x38
+ 12340020: d61f0200 br x16
+ \.\.\.
+
+0000000012340030 <___veneer>:
+ 12340030: 90f6e710 adrp x16, 20000 <_start>
+ 12340034: 91006210 add x16, x16, #0x18
+ 12340038: d61f0200 br x16
+ \.\.\.
+
+0000000012340048 <___bti_veneer>:
+ 12340048: d503245f bti c
+ 1234004c: 17ffffee b 12340004 <bar>
new file mode 100644
@@ -0,0 +1,62 @@
+#name: Check linker stubs with indirect calls handle BTI (exe).
+#source: bti-far.s
+#as: -mabi=lp64
+#ld: -T bti-far.ld
+#objdump: -dr
+
+[^:]*: *file format elf64-.*aarch64
+
+
+Disassembly of section \.text:
+
+0000000000020000 <_start>:
+ 20000: 9400000e bl 20038 <__foo_veneer>
+ 20004: 94000007 bl 20020 <___veneer>
+ 20008: 94000001 bl 2000c <baz>
+
+000000000002000c <baz>:
+ 2000c: d65f03c0 ret
+ 20010: 14000010 b 20050 <__foo_veneer\+0x18>
+ 20014: d503201f nop
+
+0000000000020018 <___bti_veneer>:
+ 20018: d503245f bti c
+ 2001c: 17fffffc b 2000c <baz>
+
+0000000000020020 <___veneer>:
+ 20020: 90091910 adrp x16, 12340000 <foo>
+ 20024: 9100e210 add x16, x16, #0x38
+ 20028: d61f0200 br x16
+ \.\.\.
+
+0000000000020038 <__foo_veneer>:
+ 20038: 90091910 adrp x16, 12340000 <foo>
+ 2003c: 91006210 add x16, x16, #0x18
+ 20040: d61f0200 br x16
+ \.\.\.
+
+Disassembly of section \.far:
+
+0000000012340000 <foo>:
+ 12340000: 94000008 bl 12340020 <___veneer>
+
+0000000012340004 <bar>:
+ 12340004: 94000007 bl 12340020 <___veneer>
+ 12340008: 97fffffe bl 12340000 <foo>
+ 1234000c: 00000000 udf #0
+ 12340010: 1400000c b 12340040 <___bti_veneer\+0x8>
+ 12340014: d503201f nop
+
+0000000012340018 <__foo_bti_veneer>:
+ 12340018: d503245f bti c
+ 1234001c: 17fffff9 b 12340000 <foo>
+
+0000000012340020 <___veneer>:
+ 12340020: 90f6e710 adrp x16, 20000 <_start>
+ 12340024: 91006210 add x16, x16, #0x18
+ 12340028: d61f0200 br x16
+ \.\.\.
+
+0000000012340038 <___bti_veneer>:
+ 12340038: d503245f bti c
+ 1234003c: 17fffff2 b 12340004 <bar>
new file mode 100644
@@ -0,0 +1,15 @@
+OUTPUT_ARCH(aarch64)
+ENTRY(_start)
+SECTIONS
+{
+ . = 0x10000;
+ .rela.plt : { *(.rela.plt) *(.rela.iplt) }
+ . = 0x18000;
+ .plt : { *(.plt) *(.iplt) }
+ . = 0x20000;
+ .text : { *(.text) }
+ . = 0x30000;
+ .got : { *(.got) *(.got.plt) }
+ . = 0x12340000;
+ .far : { *(.far) }
+}
new file mode 100644
@@ -0,0 +1,29 @@
+ .text
+ .global _start
+ .type _start, %function
+_start:
+ bl foo
+ bl bar
+ bl baz
+baz:
+ ret
+
+ .section .far,"ax",@progbits
+ .global foo
+ .type foo, %function
+foo:
+ bl baz
+bar:
+ bl baz
+ bl foo
+
+ .section .note.gnu.property,"a"
+ .align 3
+ .word 4
+ .word 16
+ .word 5
+ .string "GNU"
+ .word 0xc0000000
+ .word 4
+ .word 1
+ .align 3