1/* Multiple versions of memmove
2 All versions must be listed in ifunc-impl-list.c.
3 Copyright (C) 2016 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
19
20#include <sysdep.h>
21#include <init-arch.h>
22
23/* Define multiple versions only for the definition in lib and for
24 DSO. */
25#if IS_IN (libc)
26 .text
27ENTRY(__libc_memmove)
28 .type __libc_memmove, @gnu_indirect_function
29 LOAD_RTLD_GLOBAL_RO_RDX
30 lea __memmove_erms(%rip), %RAX_LP
31 HAS_ARCH_FEATURE (Prefer_ERMS)
32 jnz 2f
33 HAS_ARCH_FEATURE (Prefer_No_AVX512)
34 jnz 1f
35 HAS_ARCH_FEATURE (AVX512F_Usable)
36 jz 1f
37 lea __memmove_avx512_no_vzeroupper(%rip), %RAX_LP
38 HAS_ARCH_FEATURE (Prefer_No_VZEROUPPER)
39 jnz 2f
40 lea __memmove_avx512_unaligned_erms(%rip), %RAX_LP
41 HAS_CPU_FEATURE (ERMS)
42 jnz 2f
43 lea __memmove_avx512_unaligned(%rip), %RAX_LP
44 ret
451: lea __memmove_avx_unaligned(%rip), %RAX_LP
46 HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
47 jz L(Fast_Unaligned_Load)
48 HAS_CPU_FEATURE (ERMS)
49 jz 2f
50 lea __memmove_avx_unaligned_erms(%rip), %RAX_LP
51 ret
52L(Fast_Unaligned_Load):
53 lea __memmove_sse2_unaligned(%rip), %RAX_LP
54 HAS_ARCH_FEATURE (Fast_Unaligned_Copy)
55 jz L(SSSE3)
56 HAS_CPU_FEATURE (ERMS)
57 jz 2f
58 lea __memmove_sse2_unaligned_erms(%rip), %RAX_LP
59 ret
60L(SSSE3):
61 HAS_CPU_FEATURE (SSSE3)
62 jz 2f
63 lea __memmove_ssse3_back(%rip), %RAX_LP
64 HAS_ARCH_FEATURE (Fast_Copy_Backward)
65 jnz 2f
66 lea __memmove_ssse3(%rip), %RAX_LP
672: ret
68END(__libc_memmove)
69#endif
70
71#if IS_IN (libc)
72# define MEMMOVE_SYMBOL(p,s) p##_sse2_##s
73
74# ifdef SHARED
75libc_hidden_ver (__memmove_sse2_unaligned, memmove)
76libc_hidden_ver (__memcpy_sse2_unaligned, memcpy)
77libc_hidden_ver (__mempcpy_sse2_unaligned, mempcpy)
78libc_hidden_ver (__mempcpy_sse2_unaligned, __mempcpy)
79
80# undef libc_hidden_builtin_def
81/* It doesn't make sense to send libc-internal memmove calls through a PLT.
82 The speedup we get from using SSE2 instructions is likely eaten away
83 by the indirect call in the PLT. */
84# define libc_hidden_builtin_def
85# endif
86strong_alias (__libc_memmove, memmove)
87#endif
88
89#if !defined SHARED || !IS_IN (libc)
90weak_alias (__mempcpy, mempcpy)
91#endif
92
93#include "../memmove.S"
94
95#if defined SHARED && IS_IN (libc)
96# include <shlib-compat.h>
97# if SHLIB_COMPAT (libc, GLIBC_2_2_5, GLIBC_2_14)
98/* Use __memmove_sse2_unaligned to support overlapping addresses. */
99compat_symbol (libc, __memmove_sse2_unaligned, memcpy, GLIBC_2_2_5);
100# endif
101#endif
102