diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2016-01-26 09:13:44 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2016-02-24 14:57:27 +0000 |
commit | 1e48ef7fcc374051730381a2a05da77eb4eafdb0 (patch) | |
tree | f7f7174e996accf28dfdeef5fa79a452e4dbfa41 /arch/arm64/kernel/head.S | |
parent | 6c94f27ac847ff8ef15b3da5b200574923bd6287 (diff) | |
download | op-kernel-dev-1e48ef7fcc374051730381a2a05da77eb4eafdb0.zip op-kernel-dev-1e48ef7fcc374051730381a2a05da77eb4eafdb0.tar.gz |
arm64: add support for building vmlinux as a relocatable PIE binary
This implements CONFIG_RELOCATABLE, which links the final vmlinux
image with a dynamic relocation section, allowing the early boot code
to perform a relocation to a different virtual address at runtime.
This is a prerequisite for KASLR (CONFIG_RANDOMIZE_BASE).
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel/head.S')
-rw-r--r-- | arch/arm64/kernel/head.S | 32 |
1 files changed, 32 insertions, 0 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 4cad8f9..4e69412 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -29,6 +29,7 @@ #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/cputype.h> +#include <asm/elf.h> #include <asm/kernel-pgtable.h> #include <asm/memory.h> #include <asm/pgtable-hwdef.h> @@ -432,6 +433,37 @@ __mmap_switched: bl __pi_memset dsb ishst // Make zero page visible to PTW +#ifdef CONFIG_RELOCATABLE + + /* + * Iterate over each entry in the relocation table, and apply the + * relocations in place. + */ + adr_l x8, __dynsym_start // start of symbol table + adr_l x9, __reloc_start // start of reloc table + adr_l x10, __reloc_end // end of reloc table + +0: cmp x9, x10 + b.hs 2f + ldp x11, x12, [x9], #24 + ldr x13, [x9, #-8] + cmp w12, #R_AARCH64_RELATIVE + b.ne 1f + str x13, [x11] + b 0b + +1: cmp w12, #R_AARCH64_ABS64 + b.ne 0b + add x12, x12, x12, lsl #1 // symtab offset: 24x top word + add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word + ldr x15, [x12, #8] // Elf64_Sym::st_value + add x15, x13, x15 + str x15, [x11] + b 0b + +2: +#endif + adr_l sp, initial_sp, x4 mov x4, sp and x4, x4, #~(THREAD_SIZE - 1) |