[6/6] LoongArch: Clean up la_abs macro
Commit Message
Now we can replace la_abs with la.pcrel. Clean up the la_abs macro.
Signed-off-by: Jinyang He <hejinyang@loongson.cn>
---
arch/loongarch/include/asm/asmmacro.h | 17 ---------------
arch/loongarch/include/asm/setup.h | 7 -------
arch/loongarch/include/asm/stackframe.h | 2 +-
arch/loongarch/kernel/entry.S | 4 ++--
arch/loongarch/kernel/genex.S | 6 +++---
arch/loongarch/kernel/relocate.c | 28 -------------------------
arch/loongarch/kernel/vmlinux.lds.S | 9 --------
arch/loongarch/mm/tlbex.S | 14 ++++++-------
8 files changed, 13 insertions(+), 74 deletions(-)
Comments
On Fri, 2023-02-24 at 18:10 +0800, Jinyang He wrote:
/* snip */
> diff --git a/arch/loongarch/kernel/entry.S
> b/arch/loongarch/kernel/entry.S
> index ca4651f91e73..4de6b31dc3bf 100644
> --- a/arch/loongarch/kernel/entry.S
> +++ b/arch/loongarch/kernel/entry.S
/* snip */
> @@ -65,7 +65,7 @@ SYM_FUNC_START(handle_sys)
> and tp, tp, sp
>
> move a0, sp
> - la_abs ra, do_syscall
> + la.pcrel ra, do_syscall
> jirl ra, ra, 0
bl do_syscall
>
> RESTORE_ALL_AND_RET
> diff --git a/arch/loongarch/kernel/genex.S
> b/arch/loongarch/kernel/genex.S
> index 8705a7661ce9..b6a74246d1c4 100644
> --- a/arch/loongarch/kernel/genex.S
> +++ b/arch/loongarch/kernel/genex.S
/* snip */
> @@ -45,7 +45,7 @@ SYM_FUNC_START(handle_vint\idx)
> LONG_S t0, sp, PT_ERA
> 1: move a0, sp
> move a1, sp
> - la_abs t0, do_vint
> + la.pcrel t0, do_vint
> jirl ra, t0, 0
bl do_vint
> RESTORE_ALL_AND_RET
> SYM_FUNC_END(handle_vint\idx)
> @@ -76,7 +76,7 @@ SYM_FUNC_START(handle_\exception)
> SAVE_ALL
> build_prep_\prep
> move a0, sp
> - la_abs t0, do_\handler
> + la.pcrel t0, do_\handler
> jirl ra, t0, 0
bl do_\handler
/* snip */
> diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
> index 53321d3447a2..196d9bc870c5 100644
> --- a/arch/loongarch/mm/tlbex.S
> +++ b/arch/loongarch/mm/tlbex.S
> @@ -41,7 +41,7 @@ SYM_FUNC_START(handle_tlb_protect\idx)
> move a1, zero
> csrrd a2, LOONGARCH_CSR_BADV
> REG_S a2, sp, PT_BVADDR
> - la_abs t0, do_page_fault
> + la.pcrel t0, do_page_fault
> jirl ra, t0, 0
bl do_page_fault
/* snip */
> @@ -190,7 +190,7 @@ SYM_FUNC_START(handle_tlb_load\idx)
> 5: /* nopage_tlb_load: */
> dbar 0
> csrrd ra, EXCEPTION_KS2
> - la_abs t0, tlb_do_page_fault_0
> + la.pcrel t0, tlb_do_page_fault_0
> jr t0
b tlb_do_page_fault_0
/* snip */
> @@ -341,7 +341,7 @@ tlb_huge_update_store:
> nopage_tlb_store:
> dbar 0
> csrrd ra, EXCEPTION_KS2
> - la_abs t0, tlb_do_page_fault_1
> + la.pcrel t0, tlb_do_page_fault_1
> jr t0
b tlb_do_page_fault_1
/* snip */
> @@ -490,7 +490,7 @@ tlb_huge_update_modify:
> nopage_tlb_modify:
> dbar 0
> csrrd ra, EXCEPTION_KS2
> - la_abs t0, tlb_do_page_fault_1
> + la.pcrel t0, tlb_do_page_fault_1
> jr t0
b tlb_do_page_fault_1
> SYM_FUNC_END(handle_tlb_modify)
> .endm
在 2023/2/24 18:43, Xi Ruoyao 写道:
> On Fri, 2023-02-24 at 18:10 +0800, Jinyang He wrote:
>
> /* snip */
>
>> diff --git a/arch/loongarch/kernel/entry.S
>> b/arch/loongarch/kernel/entry.S
>> index ca4651f91e73..4de6b31dc3bf 100644
>> --- a/arch/loongarch/kernel/entry.S
>> +++ b/arch/loongarch/kernel/entry.S
> /* snip */
>
>> @@ -65,7 +65,7 @@ SYM_FUNC_START(handle_sys)
>> and tp, tp, sp
>>
>> move a0, sp
>> - la_abs ra, do_syscall
>> + la.pcrel ra, do_syscall
>> jirl ra, ra, 0
> bl do_syscall
>
>>
>> RESTORE_ALL_AND_RET
>> diff --git a/arch/loongarch/kernel/genex.S
>> b/arch/loongarch/kernel/genex.S
>> index 8705a7661ce9..b6a74246d1c4 100644
>> --- a/arch/loongarch/kernel/genex.S
>> +++ b/arch/loongarch/kernel/genex.S
> /* snip */
>
>> @@ -45,7 +45,7 @@ SYM_FUNC_START(handle_vint\idx)
>> LONG_S t0, sp, PT_ERA
>> 1: move a0, sp
>> move a1, sp
>> - la_abs t0, do_vint
>> + la.pcrel t0, do_vint
>> jirl ra, t0, 0
> bl do_vint
>
>> RESTORE_ALL_AND_RET
>> SYM_FUNC_END(handle_vint\idx)
>> @@ -76,7 +76,7 @@ SYM_FUNC_START(handle_\exception)
>> SAVE_ALL
>> build_prep_\prep
>> move a0, sp
>> - la_abs t0, do_\handler
>> + la.pcrel t0, do_\handler
>> jirl ra, t0, 0
> bl do_\handler
>
> /* snip */
>
>> diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
>> index 53321d3447a2..196d9bc870c5 100644
>> --- a/arch/loongarch/mm/tlbex.S
>> +++ b/arch/loongarch/mm/tlbex.S
>> @@ -41,7 +41,7 @@ SYM_FUNC_START(handle_tlb_protect\idx)
>> move a1, zero
>> csrrd a2, LOONGARCH_CSR_BADV
>> REG_S a2, sp, PT_BVADDR
>> - la_abs t0, do_page_fault
>> + la.pcrel t0, do_page_fault
>> jirl ra, t0, 0
> bl do_page_fault
>
> /* snip */
>
>> @@ -190,7 +190,7 @@ SYM_FUNC_START(handle_tlb_load\idx)
>> 5: /* nopage_tlb_load: */
>> dbar 0
>> csrrd ra, EXCEPTION_KS2
>> - la_abs t0, tlb_do_page_fault_0
>> + la.pcrel t0, tlb_do_page_fault_0
>> jr t0
> b tlb_do_page_fault_0
>
> /* snip */
>
>> @@ -341,7 +341,7 @@ tlb_huge_update_store:
>> nopage_tlb_store:
>> dbar 0
>> csrrd ra, EXCEPTION_KS2
>> - la_abs t0, tlb_do_page_fault_1
>> + la.pcrel t0, tlb_do_page_fault_1
>> jr t0
> b tlb_do_page_fault_1
>
> /* snip */
>
>> @@ -490,7 +490,7 @@ tlb_huge_update_modify:
>> nopage_tlb_modify:
>> dbar 0
>> csrrd ra, EXCEPTION_KS2
>> - la_abs t0, tlb_do_page_fault_1
>> + la.pcrel t0, tlb_do_page_fault_1
>> jr t0
> b tlb_do_page_fault_1
>
>> SYM_FUNC_END(handle_tlb_modify)
>> .endm
Thanks, I'll check other places, too.
Jinyang
@@ -667,21 +667,4 @@
nor \dst, \src, zero
.endm
-.macro la_abs reg, sym
-#ifndef CONFIG_RELOCATABLE
- la.abs \reg, \sym
-#else
- 766:
- lu12i.w \reg, 0
- ori \reg, \reg, 0
- lu32i.d \reg, 0
- lu52i.d \reg, \reg, 0
- .pushsection ".la_abs", "aw", %progbits
- 768:
- .dword 768b-766b
- .dword \sym
- .popsection
-#endif
-.endm
-
#endif /* _ASM_ASMMACRO_H */
@@ -16,13 +16,6 @@ extern void per_cpu_trap_init(int cpu);
#ifdef CONFIG_RELOCATABLE
-struct rela_la_abs {
- long offset;
- long symvalue;
-};
-
-extern long __la_abs_begin;
-extern long __la_abs_end;
extern long __rela_dyn_begin;
extern long __rela_dyn_end;
@@ -86,7 +86,7 @@
* new value in sp.
*/
.macro get_saved_sp docfi=0
- la_abs t1, kernelsp
+ la.pcrel t1, kernelsp
#ifdef CONFIG_SMP
csrrd t0, PERCPU_BASE_KS
LONG_ADD t1, t1, t0
@@ -22,7 +22,7 @@
.align 5
SYM_FUNC_START(handle_sys)
csrrd t0, PERCPU_BASE_KS
- la_abs t1, kernelsp
+ la.pcrel t1, kernelsp
add.d t1, t1, t0
move t2, sp
ld.d sp, t1, 0
@@ -65,7 +65,7 @@ SYM_FUNC_START(handle_sys)
and tp, tp, sp
move a0, sp
- la_abs ra, do_syscall
+ la.pcrel ra, do_syscall
jirl ra, ra, 0
RESTORE_ALL_AND_RET
@@ -36,7 +36,7 @@ SYM_FUNC_END(__arch_cpu_idle)
SYM_FUNC_START(handle_vint\idx)
BACKUP_T0T1
SAVE_ALL
- la_abs t1, __arch_cpu_idle
+ la.pcrel t1, __arch_cpu_idle
LONG_L t0, sp, PT_ERA
/* 32 byte rollback region */
ori t0, t0, 0x1f
@@ -45,7 +45,7 @@ SYM_FUNC_START(handle_vint\idx)
LONG_S t0, sp, PT_ERA
1: move a0, sp
move a1, sp
- la_abs t0, do_vint
+ la.pcrel t0, do_vint
jirl ra, t0, 0
RESTORE_ALL_AND_RET
SYM_FUNC_END(handle_vint\idx)
@@ -76,7 +76,7 @@ SYM_FUNC_START(handle_\exception)
SAVE_ALL
build_prep_\prep
move a0, sp
- la_abs t0, do_\handler
+ la.pcrel t0, do_\handler
jirl ra, t0, 0
UNW_NEED_RESET
RESTORE_ALL_AND_RET
@@ -12,7 +12,6 @@
#include <linux/start_kernel.h>
#include <asm/bootinfo.h>
#include <asm/early_ioremap.h>
-#include <asm/inst.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -41,31 +40,6 @@ static inline __init void relocate_relative(void)
}
}
-static inline void __init relocate_la_abs(long random_offset)
-{
- void *begin, *end;
- struct rela_la_abs *p;
-
- begin = RELOCATED_KASLR(&__la_abs_begin);
- end = RELOCATED_KASLR(&__la_abs_end);
-
- for (p = begin; (void *)p < end; p++) {
- long v = p->symvalue;
- uint32_t lu12iw, ori, lu32id, lu52id;
- union loongarch_instruction *insn = (void *)p - p->offset;
-
- lu12iw = (v >> 12) & 0xfffff;
- ori = v & 0xfff;
- lu32id = (v >> 32) & 0xfffff;
- lu52id = v >> 52;
-
- insn[0].reg1i20_format.immediate = lu12iw;
- insn[1].reg2i12_format.immediate = ori;
- insn[2].reg1i20_format.immediate = lu32id;
- insn[3].reg2i12_format.immediate = lu52id;
- }
-}
-
#ifdef CONFIG_RANDOMIZE_BASE
static inline __init unsigned long rotate_xor(unsigned long hash,
const void *area, size_t size)
@@ -202,8 +176,6 @@ void * __init relocate_kernel(void)
if (reloc_offset)
relocate_relative();
- relocate_la_abs(random_offset);
-
return kernel_entry;
}
@@ -91,15 +91,6 @@ SECTIONS
__alt_instructions_end = .;
}
-#ifdef CONFIG_RELOCATABLE
- . = ALIGN(8);
- .la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
- __la_abs_begin = .;
- *(.la_abs)
- __la_abs_end = .;
- }
-#endif
-
.got : ALIGN(16) { *(.got) }
.plt : ALIGN(16) { *(.plt) }
.got.plt : ALIGN(16) { *(.got.plt) }
@@ -41,7 +41,7 @@ SYM_FUNC_START(handle_tlb_protect\idx)
move a1, zero
csrrd a2, LOONGARCH_CSR_BADV
REG_S a2, sp, PT_BVADDR
- la_abs t0, do_page_fault
+ la.pcrel t0, do_page_fault
jirl ra, t0, 0
RESTORE_ALL_AND_RET
SYM_FUNC_END(handle_tlb_protect\idx)
@@ -119,7 +119,7 @@ SYM_FUNC_START(handle_tlb_load\idx)
#ifdef CONFIG_64BIT
3: /* vmalloc_load: */
- la_abs t1, swapper_pg_dir
+ la.pcrel t1, swapper_pg_dir
b 1b
#endif
@@ -190,7 +190,7 @@ SYM_FUNC_START(handle_tlb_load\idx)
5: /* nopage_tlb_load: */
dbar 0
csrrd ra, EXCEPTION_KS2
- la_abs t0, tlb_do_page_fault_0
+ la.pcrel t0, tlb_do_page_fault_0
jr t0
SYM_FUNC_END(handle_tlb_load\idx)
.endm
@@ -268,7 +268,7 @@ smp_pgtable_change_store:
#ifdef CONFIG_64BIT
vmalloc_store:
- la_abs t1, swapper_pg_dir
+ la.pcrel t1, swapper_pg_dir
b vmalloc_done_store
#endif
@@ -341,7 +341,7 @@ tlb_huge_update_store:
nopage_tlb_store:
dbar 0
csrrd ra, EXCEPTION_KS2
- la_abs t0, tlb_do_page_fault_1
+ la.pcrel t0, tlb_do_page_fault_1
jr t0
SYM_FUNC_END(handle_tlb_store)
.endm
@@ -418,7 +418,7 @@ smp_pgtable_change_modify:
#ifdef CONFIG_64BIT
vmalloc_modify:
- la_abs t1, swapper_pg_dir
+ la.pcrel t1, swapper_pg_dir
b vmalloc_done_modify
#endif
@@ -490,7 +490,7 @@ tlb_huge_update_modify:
nopage_tlb_modify:
dbar 0
csrrd ra, EXCEPTION_KS2
- la_abs t0, tlb_do_page_fault_1
+ la.pcrel t0, tlb_do_page_fault_1
jr t0
SYM_FUNC_END(handle_tlb_modify)
.endm