1 | /* Initialize CPU feature data. |
2 | This file is part of the GNU C Library. |
3 | Copyright (C) 2008-2017 Free Software Foundation, Inc. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #include <cpuid.h> |
20 | #include <cpu-features.h> |
21 | #include <libc-internal.h> |
22 | |
23 | static void |
24 | get_common_indeces (struct cpu_features *cpu_features, |
25 | unsigned int *family, unsigned int *model, |
26 | unsigned int *extended_model, unsigned int *stepping) |
27 | { |
28 | if (family) |
29 | { |
30 | unsigned int eax; |
31 | __cpuid (1, eax, cpu_features->cpuid[COMMON_CPUID_INDEX_1].ebx, |
32 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx, |
33 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].edx); |
34 | cpu_features->cpuid[COMMON_CPUID_INDEX_1].eax = eax; |
35 | *family = (eax >> 8) & 0x0f; |
36 | *model = (eax >> 4) & 0x0f; |
37 | *extended_model = (eax >> 12) & 0xf0; |
38 | *stepping = eax & 0x0f; |
39 | if (*family == 0x0f) |
40 | { |
41 | *family += (eax >> 20) & 0xff; |
42 | *model += *extended_model; |
43 | } |
44 | } |
45 | |
46 | if (cpu_features->max_cpuid >= 7) |
47 | __cpuid_count (7, 0, |
48 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].eax, |
49 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].ebx, |
50 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].ecx, |
51 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].edx); |
52 | |
53 | /* Can we call xgetbv? */ |
54 | if (CPU_FEATURES_CPU_P (cpu_features, OSXSAVE)) |
55 | { |
56 | unsigned int xcrlow; |
57 | unsigned int xcrhigh; |
58 | asm ("xgetbv" : "=a" (xcrlow), "=d" (xcrhigh) : "c" (0)); |
59 | /* Is YMM and XMM state usable? */ |
60 | if ((xcrlow & (bit_YMM_state | bit_XMM_state)) == |
61 | (bit_YMM_state | bit_XMM_state)) |
62 | { |
63 | /* Determine if AVX is usable. */ |
64 | if (CPU_FEATURES_CPU_P (cpu_features, AVX)) |
65 | { |
66 | cpu_features->feature[index_arch_AVX_Usable] |
67 | |= bit_arch_AVX_Usable; |
68 | /* The following features depend on AVX being usable. */ |
69 | /* Determine if AVX2 is usable. */ |
70 | if (CPU_FEATURES_CPU_P (cpu_features, AVX2)) |
71 | cpu_features->feature[index_arch_AVX2_Usable] |
72 | |= bit_arch_AVX2_Usable; |
73 | /* Determine if FMA is usable. */ |
74 | if (CPU_FEATURES_CPU_P (cpu_features, FMA)) |
75 | cpu_features->feature[index_arch_FMA_Usable] |
76 | |= bit_arch_FMA_Usable; |
77 | } |
78 | |
79 | /* Check if OPMASK state, upper 256-bit of ZMM0-ZMM15 and |
80 | ZMM16-ZMM31 state are enabled. */ |
81 | if ((xcrlow & (bit_Opmask_state | bit_ZMM0_15_state |
82 | | bit_ZMM16_31_state)) == |
83 | (bit_Opmask_state | bit_ZMM0_15_state | bit_ZMM16_31_state)) |
84 | { |
85 | /* Determine if AVX512F is usable. */ |
86 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512F)) |
87 | { |
88 | cpu_features->feature[index_arch_AVX512F_Usable] |
89 | |= bit_arch_AVX512F_Usable; |
90 | /* Determine if AVX512DQ is usable. */ |
91 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512DQ)) |
92 | cpu_features->feature[index_arch_AVX512DQ_Usable] |
93 | |= bit_arch_AVX512DQ_Usable; |
94 | } |
95 | } |
96 | } |
97 | |
98 | /* For _dl_runtime_resolve, set xsave_state_size to xsave area |
99 | size + integer register save size and align it to 64 bytes. */ |
100 | if (cpu_features->max_cpuid >= 0xd) |
101 | { |
102 | unsigned int eax, ebx, ecx, edx; |
103 | |
104 | __cpuid_count (0xd, 0, eax, ebx, ecx, edx); |
105 | if (ebx != 0) |
106 | { |
107 | cpu_features->xsave_state_size |
108 | = ALIGN_UP (ebx + STATE_SAVE_OFFSET, 64); |
109 | |
110 | __cpuid_count (0xd, 1, eax, ebx, ecx, edx); |
111 | |
112 | /* Check if XSAVEC is available. */ |
113 | if ((eax & (1 << 1)) != 0) |
114 | { |
115 | unsigned int xstate_comp_offsets[32]; |
116 | unsigned int xstate_comp_sizes[32]; |
117 | unsigned int i; |
118 | |
119 | xstate_comp_offsets[0] = 0; |
120 | xstate_comp_offsets[1] = 160; |
121 | xstate_comp_offsets[2] = 576; |
122 | xstate_comp_sizes[0] = 160; |
123 | xstate_comp_sizes[1] = 256; |
124 | |
125 | for (i = 2; i < 32; i++) |
126 | { |
127 | if ((STATE_SAVE_MASK & (1 << i)) != 0) |
128 | { |
129 | __cpuid_count (0xd, i, eax, ebx, ecx, edx); |
130 | xstate_comp_sizes[i] = eax; |
131 | } |
132 | else |
133 | { |
134 | ecx = 0; |
135 | xstate_comp_sizes[i] = 0; |
136 | } |
137 | |
138 | if (i > 2) |
139 | { |
140 | xstate_comp_offsets[i] |
141 | = (xstate_comp_offsets[i - 1] |
142 | + xstate_comp_sizes[i -1]); |
143 | if ((ecx & (1 << 1)) != 0) |
144 | xstate_comp_offsets[i] |
145 | = ALIGN_UP (xstate_comp_offsets[i], 64); |
146 | } |
147 | } |
148 | |
149 | /* Use XSAVEC. */ |
150 | unsigned int size |
151 | = xstate_comp_offsets[31] + xstate_comp_sizes[31]; |
152 | if (size) |
153 | { |
154 | cpu_features->xsave_state_size |
155 | = ALIGN_UP (size + STATE_SAVE_OFFSET, 64); |
156 | cpu_features->feature[index_arch_XSAVEC_Usable] |
157 | |= bit_arch_XSAVEC_Usable; |
158 | } |
159 | } |
160 | } |
161 | } |
162 | } |
163 | } |
164 | |
165 | static inline void |
166 | init_cpu_features (struct cpu_features *cpu_features) |
167 | { |
168 | unsigned int ebx, ecx, edx; |
169 | unsigned int family = 0; |
170 | unsigned int model = 0; |
171 | enum cpu_features_kind kind; |
172 | |
173 | #if !HAS_CPUID |
174 | if (__get_cpuid_max (0, 0) == 0) |
175 | { |
176 | kind = arch_kind_other; |
177 | goto no_cpuid; |
178 | } |
179 | #endif |
180 | |
181 | __cpuid (0, cpu_features->max_cpuid, ebx, ecx, edx); |
182 | |
183 | /* This spells out "GenuineIntel". */ |
184 | if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69) |
185 | { |
186 | unsigned int extended_model, stepping; |
187 | |
188 | kind = arch_kind_intel; |
189 | |
190 | get_common_indeces (cpu_features, &family, &model, &extended_model, |
191 | &stepping); |
192 | |
193 | if (family == 0x06) |
194 | { |
195 | ecx = cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx; |
196 | model += extended_model; |
197 | switch (model) |
198 | { |
199 | case 0x1c: |
200 | case 0x26: |
201 | /* BSF is slow on Atom. */ |
202 | cpu_features->feature[index_arch_Slow_BSF] |
203 | |= bit_arch_Slow_BSF; |
204 | break; |
205 | |
206 | case 0x57: |
207 | /* Knights Landing. Enable Silvermont optimizations. */ |
208 | |
209 | case 0x5c: |
210 | case 0x5f: |
211 | /* Unaligned load versions are faster than SSSE3 |
212 | on Goldmont. */ |
213 | |
214 | case 0x4c: |
215 | /* Airmont is a die shrink of Silvermont. */ |
216 | |
217 | case 0x37: |
218 | case 0x4a: |
219 | case 0x4d: |
220 | case 0x5a: |
221 | case 0x5d: |
222 | /* Unaligned load versions are faster than SSSE3 |
223 | on Silvermont. */ |
224 | #if index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop |
225 | # error index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop |
226 | #endif |
227 | #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 |
228 | # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 |
229 | #endif |
230 | #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy |
231 | # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy |
232 | #endif |
233 | cpu_features->feature[index_arch_Fast_Unaligned_Load] |
234 | |= (bit_arch_Fast_Unaligned_Load |
235 | | bit_arch_Fast_Unaligned_Copy |
236 | | bit_arch_Prefer_PMINUB_for_stringop |
237 | | bit_arch_Slow_SSE4_2); |
238 | break; |
239 | |
240 | default: |
241 | /* Unknown family 0x06 processors. Assuming this is one |
242 | of Core i3/i5/i7 processors if AVX is available. */ |
243 | if ((ecx & bit_cpu_AVX) == 0) |
244 | break; |
245 | |
246 | case 0x1a: |
247 | case 0x1e: |
248 | case 0x1f: |
249 | case 0x25: |
250 | case 0x2c: |
251 | case 0x2e: |
252 | case 0x2f: |
253 | /* Rep string instructions, unaligned load, unaligned copy, |
254 | and pminub are fast on Intel Core i3, i5 and i7. */ |
255 | #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load |
256 | # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load |
257 | #endif |
258 | #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop |
259 | # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop |
260 | #endif |
261 | #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy |
262 | # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy |
263 | #endif |
264 | cpu_features->feature[index_arch_Fast_Rep_String] |
265 | |= (bit_arch_Fast_Rep_String |
266 | | bit_arch_Fast_Unaligned_Load |
267 | | bit_arch_Fast_Unaligned_Copy |
268 | | bit_arch_Prefer_PMINUB_for_stringop); |
269 | break; |
270 | |
271 | case 0x3f: |
272 | /* Xeon E7 v3 with stepping >= 4 has working TSX. */ |
273 | if (stepping >= 4) |
274 | break; |
275 | case 0x3c: |
276 | case 0x45: |
277 | case 0x46: |
278 | /* Disable Intel TSX on Haswell processors (except Xeon E7 v3 |
279 | with stepping >= 4) to avoid TSX on kernels that weren't |
280 | updated with the latest microcode package (which disables |
281 | broken feature by default). */ |
282 | cpu_features->cpuid[COMMON_CPUID_INDEX_7].ebx &= ~(bit_cpu_RTM); |
283 | break; |
284 | } |
285 | } |
286 | |
287 | /* Unaligned load with 256-bit AVX registers are faster on |
288 | Intel processors with AVX2. */ |
289 | if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)) |
290 | cpu_features->feature[index_arch_AVX_Fast_Unaligned_Load] |
291 | |= bit_arch_AVX_Fast_Unaligned_Load; |
292 | |
293 | /* Since AVX512ER is unique to Xeon Phi, set Prefer_No_VZEROUPPER |
294 | if AVX512ER is available. Don't use AVX512 to avoid lower CPU |
295 | frequency if AVX512ER isn't available. */ |
296 | if (CPU_FEATURES_CPU_P (cpu_features, AVX512ER)) |
297 | cpu_features->feature[index_arch_Prefer_No_VZEROUPPER] |
298 | |= bit_arch_Prefer_No_VZEROUPPER; |
299 | else |
300 | cpu_features->feature[index_arch_Prefer_No_AVX512] |
301 | |= bit_arch_Prefer_No_AVX512; |
302 | } |
303 | /* This spells out "AuthenticAMD". */ |
304 | else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) |
305 | { |
306 | unsigned int extended_model, stepping; |
307 | |
308 | kind = arch_kind_amd; |
309 | |
310 | get_common_indeces (cpu_features, &family, &model, &extended_model, |
311 | &stepping); |
312 | |
313 | ecx = cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx; |
314 | |
315 | unsigned int eax; |
316 | __cpuid (0x80000000, eax, ebx, ecx, edx); |
317 | if (eax >= 0x80000001) |
318 | __cpuid (0x80000001, |
319 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].eax, |
320 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ebx, |
321 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ecx, |
322 | cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].edx); |
323 | |
324 | if (HAS_ARCH_FEATURE (AVX_Usable)) |
325 | { |
326 | /* Since the FMA4 bit is in COMMON_CPUID_INDEX_80000001 and |
327 | FMA4 requires AVX, determine if FMA4 is usable here. */ |
328 | if (CPU_FEATURES_CPU_P (cpu_features, FMA4)) |
329 | cpu_features->feature[index_arch_FMA4_Usable] |
330 | |= bit_arch_FMA4_Usable; |
331 | } |
332 | |
333 | if (family == 0x15) |
334 | { |
335 | #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward |
336 | # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward |
337 | #endif |
338 | /* "Excavator" */ |
339 | if (model >= 0x60 && model <= 0x7f) |
340 | cpu_features->feature[index_arch_Fast_Unaligned_Load] |
341 | |= (bit_arch_Fast_Unaligned_Load |
342 | | bit_arch_Fast_Copy_Backward); |
343 | } |
344 | } |
345 | else |
346 | { |
347 | kind = arch_kind_other; |
348 | get_common_indeces (cpu_features, NULL, NULL, NULL, NULL); |
349 | } |
350 | |
351 | /* Support i586 if CX8 is available. */ |
352 | if (CPU_FEATURES_CPU_P (cpu_features, CX8)) |
353 | cpu_features->feature[index_arch_I586] |= bit_arch_I586; |
354 | |
355 | /* Support i686 if CMOV is available. */ |
356 | if (CPU_FEATURES_CPU_P (cpu_features, CMOV)) |
357 | cpu_features->feature[index_arch_I686] |= bit_arch_I686; |
358 | |
359 | #if !HAS_CPUID |
360 | no_cpuid: |
361 | #endif |
362 | |
363 | cpu_features->family = family; |
364 | cpu_features->model = model; |
365 | cpu_features->kind = kind; |
366 | } |
367 | |