diff options
author | Xi Ruoyao <xry111@xry111.site> | 2023-02-25 15:52:56 +0800 |
---|---|---|
committer | Huacai Chen <chenhuacai@loongson.cn> | 2023-02-25 22:12:16 +0800 |
commit | f733f119e9b31088063652a7ad16963d85cb73dd (patch) | |
tree | 24074917ede441e0f79a841c9cd061e802f90625 /arch | |
parent | 41596803302d83a67a80dc1efef4e51ac46acabb (diff) |
LoongArch: Use la.pcrel instead of la.abs when it's trivially possible
Let's start to kill la.abs in preparation for the subsequent support of
the PIE kernel.
BTW, Re-tab the indention in arch/loongarch/kernel/entry.S for alignment.
Signed-off-by: Xi Ruoyao <xry111@xry111.site>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/loongarch/include/asm/stackframe.h | 2 | ||||
-rw-r--r-- | arch/loongarch/include/asm/uaccess.h | 1 | ||||
-rw-r--r-- | arch/loongarch/kernel/entry.S | 90 | ||||
-rw-r--r-- | arch/loongarch/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/loongarch/mm/tlbex.S | 3 |
5 files changed, 48 insertions, 50 deletions
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h index 4ca953062b5b..7deb043ce387 100644 --- a/arch/loongarch/include/asm/stackframe.h +++ b/arch/loongarch/include/asm/stackframe.h @@ -90,7 +90,7 @@ .endm .macro set_saved_sp stackp temp temp2 - la.abs \temp, kernelsp + la.pcrel \temp, kernelsp #ifdef CONFIG_SMP LONG_ADD \temp, \temp, u0 #endif diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h index 255899d4a7c3..0d22991ae430 100644 --- a/arch/loongarch/include/asm/uaccess.h +++ b/arch/loongarch/include/asm/uaccess.h @@ -22,7 +22,6 @@ extern u64 __ua_limit; #define __UA_ADDR ".dword" -#define __UA_LA "la.abs" #define __UA_LIMIT __ua_limit /* diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S index d53b631c9022..f14ab6a0e0b0 100644 --- a/arch/loongarch/kernel/entry.S +++ b/arch/loongarch/kernel/entry.S @@ -19,70 +19,70 @@ .cfi_sections .debug_frame .align 5 SYM_FUNC_START(handle_syscall) - csrrd t0, PERCPU_BASE_KS - la.abs t1, kernelsp - add.d t1, t1, t0 - move t2, sp - ld.d sp, t1, 0 + csrrd t0, PERCPU_BASE_KS + la.pcrel t1, kernelsp + add.d t1, t1, t0 + move t2, sp + ld.d sp, t1, 0 - addi.d sp, sp, -PT_SIZE - cfi_st t2, PT_R3 + addi.d sp, sp, -PT_SIZE + cfi_st t2, PT_R3 cfi_rel_offset sp, PT_R3 - st.d zero, sp, PT_R0 - csrrd t2, LOONGARCH_CSR_PRMD - st.d t2, sp, PT_PRMD - csrrd t2, LOONGARCH_CSR_CRMD - st.d t2, sp, PT_CRMD - csrrd t2, LOONGARCH_CSR_EUEN - st.d t2, sp, PT_EUEN - csrrd t2, LOONGARCH_CSR_ECFG - st.d t2, sp, PT_ECFG - csrrd t2, LOONGARCH_CSR_ESTAT - st.d t2, sp, PT_ESTAT - cfi_st ra, PT_R1 - cfi_st a0, PT_R4 - cfi_st a1, PT_R5 - cfi_st a2, PT_R6 - cfi_st a3, PT_R7 - cfi_st a4, PT_R8 - cfi_st a5, PT_R9 - cfi_st a6, PT_R10 - cfi_st a7, PT_R11 - csrrd ra, LOONGARCH_CSR_ERA - st.d ra, sp, PT_ERA + st.d zero, sp, PT_R0 + csrrd t2, LOONGARCH_CSR_PRMD + st.d t2, sp, PT_PRMD + csrrd t2, LOONGARCH_CSR_CRMD + st.d t2, sp, PT_CRMD + csrrd t2, LOONGARCH_CSR_EUEN + st.d t2, sp, PT_EUEN + csrrd t2, LOONGARCH_CSR_ECFG + st.d t2, sp, PT_ECFG + csrrd t2, LOONGARCH_CSR_ESTAT + st.d t2, sp, PT_ESTAT + cfi_st ra, PT_R1 + cfi_st a0, PT_R4 + cfi_st a1, PT_R5 + cfi_st a2, PT_R6 + cfi_st a3, PT_R7 + cfi_st a4, PT_R8 + cfi_st a5, PT_R9 + cfi_st a6, PT_R10 + cfi_st a7, PT_R11 + csrrd ra, LOONGARCH_CSR_ERA + st.d ra, sp, PT_ERA cfi_rel_offset ra, PT_ERA - cfi_st tp, PT_R2 - cfi_st u0, PT_R21 - cfi_st fp, PT_R22 + cfi_st tp, PT_R2 + cfi_st u0, PT_R21 + cfi_st fp, PT_R22 SAVE_STATIC - move u0, t0 - li.d tp, ~_THREAD_MASK - and tp, tp, sp + move u0, t0 + li.d tp, ~_THREAD_MASK + and tp, tp, sp - move a0, sp - bl do_syscall + move a0, sp + bl do_syscall RESTORE_ALL_AND_RET SYM_FUNC_END(handle_syscall) SYM_CODE_START(ret_from_fork) - bl schedule_tail # a0 = struct task_struct *prev - move a0, sp - bl syscall_exit_to_user_mode + bl schedule_tail # a0 = struct task_struct *prev + move a0, sp + bl syscall_exit_to_user_mode RESTORE_STATIC RESTORE_SOME RESTORE_SP_AND_RET SYM_CODE_END(ret_from_fork) SYM_CODE_START(ret_from_kernel_thread) - bl schedule_tail # a0 = struct task_struct *prev - move a0, s1 - jirl ra, s0, 0 - move a0, sp - bl syscall_exit_to_user_mode + bl schedule_tail # a0 = struct task_struct *prev + move a0, s1 + jirl ra, s0, 0 + move a0, sp + bl syscall_exit_to_user_mode RESTORE_STATIC RESTORE_SOME RESTORE_SP_AND_RET diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index 57bada6b4e93..aa6181714ec3 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -117,7 +117,7 @@ SYM_CODE_START(smpboot_entry) li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 csrwr t0, LOONGARCH_CSR_EUEN - la.abs t0, cpuboot_data + la.pcrel t0, cpuboot_data ld.d sp, t0, CPU_BOOT_STACK ld.d tp, t0, CPU_BOOT_TINFO diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S index 58781c6e4191..3dd2a9615cd9 100644 --- a/arch/loongarch/mm/tlbex.S +++ b/arch/loongarch/mm/tlbex.S @@ -24,8 +24,7 @@ move a0, sp REG_S a2, sp, PT_BVADDR li.w a1, \write - la.abs t0, do_page_fault - jirl ra, t0, 0 + bl do_page_fault RESTORE_ALL_AND_RET SYM_FUNC_END(tlb_do_page_fault_\write) .endm |