summaryrefslogtreecommitdiff
path: root/tools/arch/x86/lib/memset_64.S
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2019-12-02 17:40:57 +0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2019-12-02 17:40:57 +0300
commitbd5c6b81dd6025bd4c6ca7800a580b217d9899b9 (patch)
treefd3c99805c273eb940b8dd922ea15788f5cfd282 /tools/arch/x86/lib/memset_64.S
parent77b91c1a525d84cb560a4baef6f5f548b5c23f80 (diff)
downloadlinux-bd5c6b81dd6025bd4c6ca7800a580b217d9899b9.tar.xz
perf bench: Update the copies of x86's mem{cpy,set}_64.S
And update linux/linkage.h, which requires in turn that we make these files switch from ENTRY()/ENDPROC() to SYM_FUNC_START()/SYM_FUNC_END(): tools/perf/arch/arm64/tests/regs_load.S tools/perf/arch/arm/tests/regs_load.S tools/perf/arch/powerpc/tests/regs_load.S tools/perf/arch/x86/tests/regs_load.S We also need to switch SYM_FUNC_START_LOCAL() to SYM_FUNC_START() for the functions used directly by 'perf bench', and update tools/perf/check_headers.sh to ignore those changes when checking if the kernel original files drifted from the copies we carry. This is to get the changes from: 6dcc5627f6ae ("x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*") ef1e03152cb0 ("x86/asm: Make some functions local") e9b9d020c487 ("x86/asm: Annotate aliases") And address these tools/perf build warnings: Warning: Kernel ABI header at 'tools/arch/x86/lib/memcpy_64.S' differs from latest version at 'arch/x86/lib/memcpy_64.S' diff -u tools/arch/x86/lib/memcpy_64.S arch/x86/lib/memcpy_64.S Warning: Kernel ABI header at 'tools/arch/x86/lib/memset_64.S' differs from latest version at 'arch/x86/lib/memset_64.S' diff -u tools/arch/x86/lib/memset_64.S arch/x86/lib/memset_64.S Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Borislav Petkov <bp@suse.de> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Namhyung Kim <namhyung@kernel.org> Link: https://lkml.kernel.org/n/tip-tay3l8x8k11p7y3qcpqh9qh5@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/arch/x86/lib/memset_64.S')
-rw-r--r--tools/arch/x86/lib/memset_64.S16
1 files changed, 8 insertions, 8 deletions
diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S
index f8f3dc0a6690..fd5d25a474b7 100644
--- a/tools/arch/x86/lib/memset_64.S
+++ b/tools/arch/x86/lib/memset_64.S
@@ -18,8 +18,8 @@
*
* rax original destination
*/
-ENTRY(memset)
-ENTRY(__memset)
+SYM_FUNC_START_ALIAS(memset)
+SYM_FUNC_START(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
* to use it when possible. If not available, use fast string instructions.
@@ -42,8 +42,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset)
-ENDPROC(__memset)
+SYM_FUNC_END(__memset)
+SYM_FUNC_END_ALIAS(memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses
@@ -56,16 +56,16 @@ ENDPROC(__memset)
*
* rax original destination
*/
-ENTRY(memset_erms)
+SYM_FUNC_START(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
rep stosb
movq %r9,%rax
ret
-ENDPROC(memset_erms)
+SYM_FUNC_END(memset_erms)
-ENTRY(memset_orig)
+SYM_FUNC_START(memset_orig)
movq %rdi,%r10
/* expand byte value */
@@ -136,4 +136,4 @@ ENTRY(memset_orig)
subq %r8,%rdx
jmp .Lafter_bad_alignment
.Lfinal:
-ENDPROC(memset_orig)
+SYM_FUNC_END(memset_orig)