diff options
author | Jiri Slaby <jslaby@suse.cz> | 2019-10-11 13:51:00 +0200 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2019-10-18 11:34:39 +0200 |
commit | ef1e03152cb027d5925646d4d1772ced7595292f (patch) | |
tree | e58409650b48b30a7813a549bce169aa50a23066 /arch/x86/lib | |
parent | b4edca150106a68d05eaf823d665a355ff19e28b (diff) |
x86/asm: Make some functions local
There are a couple of assembly functions which are invoked only locally
in the file they are defined. In C, they are marked "static". In
assembly, annotate them using SYM_{FUNC,CODE}_START_LOCAL (and switch
their ENDPROC to SYM_{FUNC,CODE}_END too). Whether FUNC or CODE is used,
depends on whether ENDPROC or END was used for a particular function
before.
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Andy Shevchenko <andy@infradead.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-efi <linux-efi@vger.kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: platform-driver-x86@vger.kernel.org
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/20191011115108.12392-21-jslaby@suse.cz
Diffstat (limited to 'arch/x86/lib')
-rw-r--r-- | arch/x86/lib/copy_page_64.S | 4 | ||||
-rw-r--r-- | arch/x86/lib/memcpy_64.S | 12 | ||||
-rw-r--r-- | arch/x86/lib/memset_64.S | 8 |
3 files changed, 12 insertions, 12 deletions
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index fd2d09afa097..f505870bd93b 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -21,7 +21,7 @@ ENTRY(copy_page) ENDPROC(copy_page) EXPORT_SYMBOL(copy_page) -ENTRY(copy_page_regs) +SYM_FUNC_START_LOCAL(copy_page_regs) subq $2*8, %rsp movq %rbx, (%rsp) movq %r12, 1*8(%rsp) @@ -86,4 +86,4 @@ ENTRY(copy_page_regs) movq 1*8(%rsp), %r12 addq $2*8, %rsp ret -ENDPROC(copy_page_regs) +SYM_FUNC_END(copy_page_regs) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 57a64266ba69..3265b21e86c0 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -29,7 +29,7 @@ * rax original destination */ SYM_FUNC_START_ALIAS(__memcpy) -ENTRY(memcpy) +SYM_FUNC_START_LOCAL(memcpy) ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memcpy_erms", X86_FEATURE_ERMS @@ -41,7 +41,7 @@ ENTRY(memcpy) movl %edx, %ecx rep movsb ret -ENDPROC(memcpy) +SYM_FUNC_END(memcpy) SYM_FUNC_END_ALIAS(__memcpy) EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(__memcpy) @@ -50,14 +50,14 @@ EXPORT_SYMBOL(__memcpy) * memcpy_erms() - enhanced fast string memcpy. This is faster and * simpler than memcpy. Use memcpy_erms when possible. */ -ENTRY(memcpy_erms) +SYM_FUNC_START_LOCAL(memcpy_erms) movq %rdi, %rax movq %rdx, %rcx rep movsb ret -ENDPROC(memcpy_erms) +SYM_FUNC_END(memcpy_erms) -ENTRY(memcpy_orig) +SYM_FUNC_START_LOCAL(memcpy_orig) movq %rdi, %rax cmpq $0x20, %rdx @@ -182,7 +182,7 @@ ENTRY(memcpy_orig) .Lend: retq -ENDPROC(memcpy_orig) +SYM_FUNC_END(memcpy_orig) #ifndef CONFIG_UML diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 927ac44d34aa..564abf9ecedb 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -59,16 +59,16 @@ EXPORT_SYMBOL(__memset) * * rax original destination */ -ENTRY(memset_erms) +SYM_FUNC_START_LOCAL(memset_erms) movq %rdi,%r9 movb %sil,%al movq %rdx,%rcx rep stosb movq %r9,%rax ret -ENDPROC(memset_erms) +SYM_FUNC_END(memset_erms) -ENTRY(memset_orig) +SYM_FUNC_START_LOCAL(memset_orig) movq %rdi,%r10 /* expand byte value */ @@ -139,4 +139,4 @@ ENTRY(memset_orig) subq %r8,%rdx jmp .Lafter_bad_alignment .Lfinal: -ENDPROC(memset_orig) +SYM_FUNC_END(memset_orig) |