From 02041b32256628aef0d18ec15d3658fe41bc1afe Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 11 Apr 2022 16:10:29 -0700 Subject: x86/uaccess: Don't jump between functions For unwinding sanity, a function shouldn't jump to the middle of another function. Move the short string user copy code out to a separate non-function code snippet. Signed-off-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/9519e4853148b765e047967708f2b61e56c93186.1649718562.git.jpoimboe@redhat.com --- arch/x86/lib/copy_user_64.S | 87 +++++++++++++++++++++++++++------------------ 1 file changed, 52 insertions(+), 35 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 8ca5ecf16dc4..9dec1b38a98f 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -53,12 +53,12 @@ SYM_FUNC_START(copy_user_generic_unrolled) ASM_STAC cmpl $8,%edx - jb 20f /* less then 8 bytes, go to byte copy loop */ + jb .Lcopy_user_short_string_bytes ALIGN_DESTINATION movl %edx,%ecx andl $63,%edx shrl $6,%ecx - jz .L_copy_short_string + jz copy_user_short_string 1: movq (%rsi),%r8 2: movq 1*8(%rsi),%r9 3: movq 2*8(%rsi),%r10 @@ -79,37 +79,11 @@ SYM_FUNC_START(copy_user_generic_unrolled) leaq 64(%rdi),%rdi decl %ecx jnz 1b -.L_copy_short_string: - movl %edx,%ecx - andl $7,%edx - shrl $3,%ecx - jz 20f -18: movq (%rsi),%r8 -19: movq %r8,(%rdi) - leaq 8(%rsi),%rsi - leaq 8(%rdi),%rdi - decl %ecx - jnz 18b -20: andl %edx,%edx - jz 23f - movl %edx,%ecx -21: movb (%rsi),%al -22: movb %al,(%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz 21b -23: xor %eax,%eax - ASM_CLAC - RET + jmp copy_user_short_string 30: shll $6,%ecx addl %ecx,%edx - jmp 60f -40: leal (%rdx,%rcx,8),%edx - jmp 60f -50: movl %ecx,%edx -60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */ + jmp .Lcopy_user_handle_tail _ASM_EXTABLE_CPY(1b, 30b) _ASM_EXTABLE_CPY(2b, 30b) @@ -127,10 +101,6 @@ SYM_FUNC_START(copy_user_generic_unrolled) _ASM_EXTABLE_CPY(14b, 30b) _ASM_EXTABLE_CPY(15b, 30b) _ASM_EXTABLE_CPY(16b, 30b) - _ASM_EXTABLE_CPY(18b, 40b) - _ASM_EXTABLE_CPY(19b, 40b) - _ASM_EXTABLE_CPY(21b, 50b) - _ASM_EXTABLE_CPY(22b, 50b) SYM_FUNC_END(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) @@ -191,7 +161,7 @@ EXPORT_SYMBOL(copy_user_generic_string) SYM_FUNC_START(copy_user_enhanced_fast_string) ASM_STAC /* CPUs without FSRM should avoid rep movsb for short copies */ - ALTERNATIVE "cmpl $64, %edx; jb .L_copy_short_string", "", X86_FEATURE_FSRM + ALTERNATIVE "cmpl $64, %edx; jb copy_user_short_string", "", X86_FEATURE_FSRM movl %edx,%ecx 1: rep movsb xorl %eax,%eax @@ -243,6 +213,53 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) SYM_CODE_END(.Lcopy_user_handle_tail) +/* + * Finish memcpy of less than 64 bytes. #AC should already be set. + * + * Input: + * rdi destination + * rsi source + * rdx count (< 64) + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +SYM_CODE_START_LOCAL(copy_user_short_string) + movl %edx,%ecx + andl $7,%edx + shrl $3,%ecx + jz .Lcopy_user_short_string_bytes +18: movq (%rsi),%r8 +19: movq %r8,(%rdi) + leaq 8(%rsi),%rsi + leaq 8(%rdi),%rdi + decl %ecx + jnz 18b +.Lcopy_user_short_string_bytes: + andl %edx,%edx + jz 23f + movl %edx,%ecx +21: movb (%rsi),%al +22: movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 21b +23: xor %eax,%eax + ASM_CLAC + RET + +40: leal (%rdx,%rcx,8),%edx + jmp 60f +50: movl %ecx,%edx /* ecx is zerorest also */ +60: jmp .Lcopy_user_handle_tail + + _ASM_EXTABLE_CPY(18b, 40b) + _ASM_EXTABLE_CPY(19b, 40b) + _ASM_EXTABLE_CPY(21b, 50b) + _ASM_EXTABLE_CPY(22b, 50b) +SYM_CODE_END(copy_user_short_string) + /* * copy_user_nocache - Uncached memory copy with exception handling * This will force destination out of cache for more performance. -- cgit v1.2.3-58-ga151