target-i386: Move features logic that requires CPUState to realize time
[qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 };
258
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 };
265
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 };
276
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 };
287
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 };
298
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
308 };
309
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
321
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 /* missing:
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
342
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
348
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
362 /* missing:
363 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
370 /* missing:
371 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
372
373 typedef struct FeatureWordInfo {
374 const char **feat_names;
375 uint32_t cpuid_eax; /* Input EAX for CPUID */
376 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
377 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
378 int cpuid_reg; /* output register (R_* constant) */
379 uint32_t tcg_features; /* Feature flags supported by TCG */
380 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
381 } FeatureWordInfo;
382
383 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
384 [FEAT_1_EDX] = {
385 .feat_names = feature_name,
386 .cpuid_eax = 1, .cpuid_reg = R_EDX,
387 .tcg_features = TCG_FEATURES,
388 },
389 [FEAT_1_ECX] = {
390 .feat_names = ext_feature_name,
391 .cpuid_eax = 1, .cpuid_reg = R_ECX,
392 .tcg_features = TCG_EXT_FEATURES,
393 },
394 [FEAT_8000_0001_EDX] = {
395 .feat_names = ext2_feature_name,
396 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
397 .tcg_features = TCG_EXT2_FEATURES,
398 },
399 [FEAT_8000_0001_ECX] = {
400 .feat_names = ext3_feature_name,
401 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
402 .tcg_features = TCG_EXT3_FEATURES,
403 },
404 [FEAT_C000_0001_EDX] = {
405 .feat_names = ext4_feature_name,
406 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
407 .tcg_features = TCG_EXT4_FEATURES,
408 },
409 [FEAT_KVM] = {
410 .feat_names = kvm_feature_name,
411 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
412 .tcg_features = TCG_KVM_FEATURES,
413 },
414 [FEAT_SVM] = {
415 .feat_names = svm_feature_name,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
418 },
419 [FEAT_7_0_EBX] = {
420 .feat_names = cpuid_7_0_ebx_feature_name,
421 .cpuid_eax = 7,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
423 .cpuid_reg = R_EBX,
424 .tcg_features = TCG_7_0_EBX_FEATURES,
425 },
426 [FEAT_7_0_ECX] = {
427 .feat_names = cpuid_7_0_ecx_feature_name,
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_ECX,
431 .tcg_features = TCG_7_0_ECX_FEATURES,
432 },
433 [FEAT_8000_0007_EDX] = {
434 .feat_names = cpuid_apm_edx_feature_name,
435 .cpuid_eax = 0x80000007,
436 .cpuid_reg = R_EDX,
437 .tcg_features = TCG_APM_FEATURES,
438 .unmigratable_flags = CPUID_APM_INVTSC,
439 },
440 [FEAT_XSAVE] = {
441 .feat_names = cpuid_xsave_feature_name,
442 .cpuid_eax = 0xd,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
444 .cpuid_reg = R_EAX,
445 .tcg_features = TCG_XSAVE_FEATURES,
446 },
447 [FEAT_6_EAX] = {
448 .feat_names = cpuid_6_feature_name,
449 .cpuid_eax = 6, .cpuid_reg = R_EAX,
450 .tcg_features = TCG_6_EAX_FEATURES,
451 },
452 };
453
454 typedef struct X86RegisterInfo32 {
455 /* Name of register */
456 const char *name;
457 /* QAPI enum value register */
458 X86CPURegister32 qapi_enum;
459 } X86RegisterInfo32;
460
461 #define REGISTER(reg) \
462 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
463 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
464 REGISTER(EAX),
465 REGISTER(ECX),
466 REGISTER(EDX),
467 REGISTER(EBX),
468 REGISTER(ESP),
469 REGISTER(EBP),
470 REGISTER(ESI),
471 REGISTER(EDI),
472 };
473 #undef REGISTER
474
475 const ExtSaveArea x86_ext_save_areas[] = {
476 [XSTATE_YMM_BIT] =
477 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
478 .offset = offsetof(X86XSaveArea, avx_state),
479 .size = sizeof(XSaveAVX) },
480 [XSTATE_BNDREGS_BIT] =
481 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
482 .offset = offsetof(X86XSaveArea, bndreg_state),
483 .size = sizeof(XSaveBNDREG) },
484 [XSTATE_BNDCSR_BIT] =
485 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
486 .offset = offsetof(X86XSaveArea, bndcsr_state),
487 .size = sizeof(XSaveBNDCSR) },
488 [XSTATE_OPMASK_BIT] =
489 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = offsetof(X86XSaveArea, opmask_state),
491 .size = sizeof(XSaveOpmask) },
492 [XSTATE_ZMM_Hi256_BIT] =
493 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
494 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
495 .size = sizeof(XSaveZMM_Hi256) },
496 [XSTATE_Hi16_ZMM_BIT] =
497 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
498 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
499 .size = sizeof(XSaveHi16_ZMM) },
500 [XSTATE_PKRU_BIT] =
501 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
502 .offset = offsetof(X86XSaveArea, pkru_state),
503 .size = sizeof(XSavePKRU) },
504 };
505
506 const char *get_register_name_32(unsigned int reg)
507 {
508 if (reg >= CPU_NB_REGS32) {
509 return NULL;
510 }
511 return x86_reg_info_32[reg].name;
512 }
513
514 /*
515 * Returns the set of feature flags that are supported and migratable by
516 * QEMU, for a given FeatureWord.
517 */
518 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
519 {
520 FeatureWordInfo *wi = &feature_word_info[w];
521 uint32_t r = 0;
522 int i;
523
524 for (i = 0; i < 32; i++) {
525 uint32_t f = 1U << i;
526 /* If the feature name is unknown, it is not supported by QEMU yet */
527 if (!wi->feat_names[i]) {
528 continue;
529 }
530 /* Skip features known to QEMU, but explicitly marked as unmigratable */
531 if (wi->unmigratable_flags & f) {
532 continue;
533 }
534 r |= f;
535 }
536 return r;
537 }
538
539 void host_cpuid(uint32_t function, uint32_t count,
540 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
541 {
542 uint32_t vec[4];
543
544 #ifdef __x86_64__
545 asm volatile("cpuid"
546 : "=a"(vec[0]), "=b"(vec[1]),
547 "=c"(vec[2]), "=d"(vec[3])
548 : "0"(function), "c"(count) : "cc");
549 #elif defined(__i386__)
550 asm volatile("pusha \n\t"
551 "cpuid \n\t"
552 "mov %%eax, 0(%2) \n\t"
553 "mov %%ebx, 4(%2) \n\t"
554 "mov %%ecx, 8(%2) \n\t"
555 "mov %%edx, 12(%2) \n\t"
556 "popa"
557 : : "a"(function), "c"(count), "S"(vec)
558 : "memory", "cc");
559 #else
560 abort();
561 #endif
562
563 if (eax)
564 *eax = vec[0];
565 if (ebx)
566 *ebx = vec[1];
567 if (ecx)
568 *ecx = vec[2];
569 if (edx)
570 *edx = vec[3];
571 }
572
573 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
574
575 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
576 * a substring. ex if !NULL points to the first char after a substring,
577 * otherwise the string is assumed to sized by a terminating nul.
578 * Return lexical ordering of *s1:*s2.
579 */
580 static int sstrcmp(const char *s1, const char *e1,
581 const char *s2, const char *e2)
582 {
583 for (;;) {
584 if (!*s1 || !*s2 || *s1 != *s2)
585 return (*s1 - *s2);
586 ++s1, ++s2;
587 if (s1 == e1 && s2 == e2)
588 return (0);
589 else if (s1 == e1)
590 return (*s2);
591 else if (s2 == e2)
592 return (*s1);
593 }
594 }
595
596 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
597 * '|' delimited (possibly empty) strings in which case search for a match
598 * within the alternatives proceeds left to right. Return 0 for success,
599 * non-zero otherwise.
600 */
601 static int altcmp(const char *s, const char *e, const char *altstr)
602 {
603 const char *p, *q;
604
605 for (q = p = altstr; ; ) {
606 while (*p && *p != '|')
607 ++p;
608 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
609 return (0);
610 if (!*p)
611 return (1);
612 else
613 q = ++p;
614 }
615 }
616
617 /* search featureset for flag *[s..e), if found set corresponding bit in
618 * *pval and return true, otherwise return false
619 */
620 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
621 const char **featureset)
622 {
623 uint32_t mask;
624 const char **ppc;
625 bool found = false;
626
627 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
628 if (*ppc && !altcmp(s, e, *ppc)) {
629 *pval |= mask;
630 found = true;
631 }
632 }
633 return found;
634 }
635
636 static void add_flagname_to_bitmaps(const char *flagname,
637 FeatureWordArray words,
638 Error **errp)
639 {
640 FeatureWord w;
641 for (w = 0; w < FEATURE_WORDS; w++) {
642 FeatureWordInfo *wi = &feature_word_info[w];
643 if (wi->feat_names &&
644 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
645 break;
646 }
647 }
648 if (w == FEATURE_WORDS) {
649 error_setg(errp, "CPU feature %s not found", flagname);
650 }
651 }
652
653 /* CPU class name definitions: */
654
655 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
656 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
657
658 /* Return type name for a given CPU model name
659 * Caller is responsible for freeing the returned string.
660 */
661 static char *x86_cpu_type_name(const char *model_name)
662 {
663 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
664 }
665
666 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
667 {
668 ObjectClass *oc;
669 char *typename;
670
671 if (cpu_model == NULL) {
672 return NULL;
673 }
674
675 typename = x86_cpu_type_name(cpu_model);
676 oc = object_class_by_name(typename);
677 g_free(typename);
678 return oc;
679 }
680
681 struct X86CPUDefinition {
682 const char *name;
683 uint32_t level;
684 uint32_t xlevel;
685 uint32_t xlevel2;
686 /* vendor is zero-terminated, 12 character ASCII string */
687 char vendor[CPUID_VENDOR_SZ + 1];
688 int family;
689 int model;
690 int stepping;
691 FeatureWordArray features;
692 char model_id[48];
693 };
694
695 static X86CPUDefinition builtin_x86_defs[] = {
696 {
697 .name = "qemu64",
698 .level = 0xd,
699 .vendor = CPUID_VENDOR_AMD,
700 .family = 6,
701 .model = 6,
702 .stepping = 3,
703 .features[FEAT_1_EDX] =
704 PPRO_FEATURES |
705 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
706 CPUID_PSE36,
707 .features[FEAT_1_ECX] =
708 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
709 .features[FEAT_8000_0001_EDX] =
710 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
711 .features[FEAT_8000_0001_ECX] =
712 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
713 .xlevel = 0x8000000A,
714 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
715 },
716 {
717 .name = "phenom",
718 .level = 5,
719 .vendor = CPUID_VENDOR_AMD,
720 .family = 16,
721 .model = 2,
722 .stepping = 3,
723 /* Missing: CPUID_HT */
724 .features[FEAT_1_EDX] =
725 PPRO_FEATURES |
726 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
727 CPUID_PSE36 | CPUID_VME,
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
730 CPUID_EXT_POPCNT,
731 .features[FEAT_8000_0001_EDX] =
732 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
733 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
734 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
735 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
736 CPUID_EXT3_CR8LEG,
737 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
738 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
739 .features[FEAT_8000_0001_ECX] =
740 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
741 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
742 /* Missing: CPUID_SVM_LBRV */
743 .features[FEAT_SVM] =
744 CPUID_SVM_NPT,
745 .xlevel = 0x8000001A,
746 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
747 },
748 {
749 .name = "core2duo",
750 .level = 10,
751 .vendor = CPUID_VENDOR_INTEL,
752 .family = 6,
753 .model = 15,
754 .stepping = 11,
755 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
756 .features[FEAT_1_EDX] =
757 PPRO_FEATURES |
758 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
759 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
760 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
761 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
762 .features[FEAT_1_ECX] =
763 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
764 CPUID_EXT_CX16,
765 .features[FEAT_8000_0001_EDX] =
766 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
767 .features[FEAT_8000_0001_ECX] =
768 CPUID_EXT3_LAHF_LM,
769 .xlevel = 0x80000008,
770 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
771 },
772 {
773 .name = "kvm64",
774 .level = 0xd,
775 .vendor = CPUID_VENDOR_INTEL,
776 .family = 15,
777 .model = 6,
778 .stepping = 1,
779 /* Missing: CPUID_HT */
780 .features[FEAT_1_EDX] =
781 PPRO_FEATURES | CPUID_VME |
782 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
783 CPUID_PSE36,
784 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
785 .features[FEAT_1_ECX] =
786 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
787 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
788 .features[FEAT_8000_0001_EDX] =
789 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
790 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
791 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
792 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
793 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
794 .features[FEAT_8000_0001_ECX] =
795 0,
796 .xlevel = 0x80000008,
797 .model_id = "Common KVM processor"
798 },
799 {
800 .name = "qemu32",
801 .level = 4,
802 .vendor = CPUID_VENDOR_INTEL,
803 .family = 6,
804 .model = 6,
805 .stepping = 3,
806 .features[FEAT_1_EDX] =
807 PPRO_FEATURES,
808 .features[FEAT_1_ECX] =
809 CPUID_EXT_SSE3,
810 .xlevel = 0x80000004,
811 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
812 },
813 {
814 .name = "kvm32",
815 .level = 5,
816 .vendor = CPUID_VENDOR_INTEL,
817 .family = 15,
818 .model = 6,
819 .stepping = 1,
820 .features[FEAT_1_EDX] =
821 PPRO_FEATURES | CPUID_VME |
822 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
823 .features[FEAT_1_ECX] =
824 CPUID_EXT_SSE3,
825 .features[FEAT_8000_0001_ECX] =
826 0,
827 .xlevel = 0x80000008,
828 .model_id = "Common 32-bit KVM processor"
829 },
830 {
831 .name = "coreduo",
832 .level = 10,
833 .vendor = CPUID_VENDOR_INTEL,
834 .family = 6,
835 .model = 14,
836 .stepping = 8,
837 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
838 .features[FEAT_1_EDX] =
839 PPRO_FEATURES | CPUID_VME |
840 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
841 CPUID_SS,
842 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
843 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
844 .features[FEAT_1_ECX] =
845 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
846 .features[FEAT_8000_0001_EDX] =
847 CPUID_EXT2_NX,
848 .xlevel = 0x80000008,
849 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
850 },
851 {
852 .name = "486",
853 .level = 1,
854 .vendor = CPUID_VENDOR_INTEL,
855 .family = 4,
856 .model = 8,
857 .stepping = 0,
858 .features[FEAT_1_EDX] =
859 I486_FEATURES,
860 .xlevel = 0,
861 },
862 {
863 .name = "pentium",
864 .level = 1,
865 .vendor = CPUID_VENDOR_INTEL,
866 .family = 5,
867 .model = 4,
868 .stepping = 3,
869 .features[FEAT_1_EDX] =
870 PENTIUM_FEATURES,
871 .xlevel = 0,
872 },
873 {
874 .name = "pentium2",
875 .level = 2,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 6,
878 .model = 5,
879 .stepping = 2,
880 .features[FEAT_1_EDX] =
881 PENTIUM2_FEATURES,
882 .xlevel = 0,
883 },
884 {
885 .name = "pentium3",
886 .level = 3,
887 .vendor = CPUID_VENDOR_INTEL,
888 .family = 6,
889 .model = 7,
890 .stepping = 3,
891 .features[FEAT_1_EDX] =
892 PENTIUM3_FEATURES,
893 .xlevel = 0,
894 },
895 {
896 .name = "athlon",
897 .level = 2,
898 .vendor = CPUID_VENDOR_AMD,
899 .family = 6,
900 .model = 2,
901 .stepping = 3,
902 .features[FEAT_1_EDX] =
903 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
904 CPUID_MCA,
905 .features[FEAT_8000_0001_EDX] =
906 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
907 .xlevel = 0x80000008,
908 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
909 },
910 {
911 .name = "n270",
912 .level = 10,
913 .vendor = CPUID_VENDOR_INTEL,
914 .family = 6,
915 .model = 28,
916 .stepping = 2,
917 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
918 .features[FEAT_1_EDX] =
919 PPRO_FEATURES |
920 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
921 CPUID_ACPI | CPUID_SS,
922 /* Some CPUs got no CPUID_SEP */
923 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
924 * CPUID_EXT_XTPR */
925 .features[FEAT_1_ECX] =
926 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
927 CPUID_EXT_MOVBE,
928 .features[FEAT_8000_0001_EDX] =
929 CPUID_EXT2_NX,
930 .features[FEAT_8000_0001_ECX] =
931 CPUID_EXT3_LAHF_LM,
932 .xlevel = 0x80000008,
933 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
934 },
935 {
936 .name = "Conroe",
937 .level = 10,
938 .vendor = CPUID_VENDOR_INTEL,
939 .family = 6,
940 .model = 15,
941 .stepping = 3,
942 .features[FEAT_1_EDX] =
943 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
944 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
945 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
946 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
947 CPUID_DE | CPUID_FP87,
948 .features[FEAT_1_ECX] =
949 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
950 .features[FEAT_8000_0001_EDX] =
951 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
952 .features[FEAT_8000_0001_ECX] =
953 CPUID_EXT3_LAHF_LM,
954 .xlevel = 0x80000008,
955 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
956 },
957 {
958 .name = "Penryn",
959 .level = 10,
960 .vendor = CPUID_VENDOR_INTEL,
961 .family = 6,
962 .model = 23,
963 .stepping = 3,
964 .features[FEAT_1_EDX] =
965 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
966 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
967 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
968 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
969 CPUID_DE | CPUID_FP87,
970 .features[FEAT_1_ECX] =
971 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
972 CPUID_EXT_SSE3,
973 .features[FEAT_8000_0001_EDX] =
974 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
975 .features[FEAT_8000_0001_ECX] =
976 CPUID_EXT3_LAHF_LM,
977 .xlevel = 0x80000008,
978 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
979 },
980 {
981 .name = "Nehalem",
982 .level = 11,
983 .vendor = CPUID_VENDOR_INTEL,
984 .family = 6,
985 .model = 26,
986 .stepping = 3,
987 .features[FEAT_1_EDX] =
988 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
989 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
990 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
991 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
992 CPUID_DE | CPUID_FP87,
993 .features[FEAT_1_ECX] =
994 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
995 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
996 .features[FEAT_8000_0001_EDX] =
997 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
998 .features[FEAT_8000_0001_ECX] =
999 CPUID_EXT3_LAHF_LM,
1000 .xlevel = 0x80000008,
1001 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1002 },
1003 {
1004 .name = "Westmere",
1005 .level = 11,
1006 .vendor = CPUID_VENDOR_INTEL,
1007 .family = 6,
1008 .model = 44,
1009 .stepping = 1,
1010 .features[FEAT_1_EDX] =
1011 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1012 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1013 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1014 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1015 CPUID_DE | CPUID_FP87,
1016 .features[FEAT_1_ECX] =
1017 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1018 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1019 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1020 .features[FEAT_8000_0001_EDX] =
1021 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1022 .features[FEAT_8000_0001_ECX] =
1023 CPUID_EXT3_LAHF_LM,
1024 .features[FEAT_6_EAX] =
1025 CPUID_6_EAX_ARAT,
1026 .xlevel = 0x80000008,
1027 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1028 },
1029 {
1030 .name = "SandyBridge",
1031 .level = 0xd,
1032 .vendor = CPUID_VENDOR_INTEL,
1033 .family = 6,
1034 .model = 42,
1035 .stepping = 1,
1036 .features[FEAT_1_EDX] =
1037 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1038 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1039 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1040 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1041 CPUID_DE | CPUID_FP87,
1042 .features[FEAT_1_ECX] =
1043 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1044 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1045 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1046 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1047 CPUID_EXT_SSE3,
1048 .features[FEAT_8000_0001_EDX] =
1049 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1050 CPUID_EXT2_SYSCALL,
1051 .features[FEAT_8000_0001_ECX] =
1052 CPUID_EXT3_LAHF_LM,
1053 .features[FEAT_XSAVE] =
1054 CPUID_XSAVE_XSAVEOPT,
1055 .features[FEAT_6_EAX] =
1056 CPUID_6_EAX_ARAT,
1057 .xlevel = 0x80000008,
1058 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1059 },
1060 {
1061 .name = "IvyBridge",
1062 .level = 0xd,
1063 .vendor = CPUID_VENDOR_INTEL,
1064 .family = 6,
1065 .model = 58,
1066 .stepping = 9,
1067 .features[FEAT_1_EDX] =
1068 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1069 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1070 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1071 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1072 CPUID_DE | CPUID_FP87,
1073 .features[FEAT_1_ECX] =
1074 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1075 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1076 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1077 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1078 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1079 .features[FEAT_7_0_EBX] =
1080 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1081 CPUID_7_0_EBX_ERMS,
1082 .features[FEAT_8000_0001_EDX] =
1083 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1084 CPUID_EXT2_SYSCALL,
1085 .features[FEAT_8000_0001_ECX] =
1086 CPUID_EXT3_LAHF_LM,
1087 .features[FEAT_XSAVE] =
1088 CPUID_XSAVE_XSAVEOPT,
1089 .features[FEAT_6_EAX] =
1090 CPUID_6_EAX_ARAT,
1091 .xlevel = 0x80000008,
1092 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1093 },
1094 {
1095 .name = "Haswell-noTSX",
1096 .level = 0xd,
1097 .vendor = CPUID_VENDOR_INTEL,
1098 .family = 6,
1099 .model = 60,
1100 .stepping = 1,
1101 .features[FEAT_1_EDX] =
1102 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1103 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1104 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1105 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1106 CPUID_DE | CPUID_FP87,
1107 .features[FEAT_1_ECX] =
1108 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1109 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1110 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1111 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1112 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1113 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1114 .features[FEAT_8000_0001_EDX] =
1115 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1116 CPUID_EXT2_SYSCALL,
1117 .features[FEAT_8000_0001_ECX] =
1118 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1119 .features[FEAT_7_0_EBX] =
1120 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1121 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1122 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1123 .features[FEAT_XSAVE] =
1124 CPUID_XSAVE_XSAVEOPT,
1125 .features[FEAT_6_EAX] =
1126 CPUID_6_EAX_ARAT,
1127 .xlevel = 0x80000008,
1128 .model_id = "Intel Core Processor (Haswell, no TSX)",
1129 }, {
1130 .name = "Haswell",
1131 .level = 0xd,
1132 .vendor = CPUID_VENDOR_INTEL,
1133 .family = 6,
1134 .model = 60,
1135 .stepping = 1,
1136 .features[FEAT_1_EDX] =
1137 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1138 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1139 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1140 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1141 CPUID_DE | CPUID_FP87,
1142 .features[FEAT_1_ECX] =
1143 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1144 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1145 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1146 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1147 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1148 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1149 .features[FEAT_8000_0001_EDX] =
1150 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1151 CPUID_EXT2_SYSCALL,
1152 .features[FEAT_8000_0001_ECX] =
1153 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1154 .features[FEAT_7_0_EBX] =
1155 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1156 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1157 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1158 CPUID_7_0_EBX_RTM,
1159 .features[FEAT_XSAVE] =
1160 CPUID_XSAVE_XSAVEOPT,
1161 .features[FEAT_6_EAX] =
1162 CPUID_6_EAX_ARAT,
1163 .xlevel = 0x80000008,
1164 .model_id = "Intel Core Processor (Haswell)",
1165 },
1166 {
1167 .name = "Broadwell-noTSX",
1168 .level = 0xd,
1169 .vendor = CPUID_VENDOR_INTEL,
1170 .family = 6,
1171 .model = 61,
1172 .stepping = 2,
1173 .features[FEAT_1_EDX] =
1174 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1175 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1176 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1177 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1178 CPUID_DE | CPUID_FP87,
1179 .features[FEAT_1_ECX] =
1180 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1181 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1182 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1183 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1184 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1185 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1186 .features[FEAT_8000_0001_EDX] =
1187 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1188 CPUID_EXT2_SYSCALL,
1189 .features[FEAT_8000_0001_ECX] =
1190 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1191 .features[FEAT_7_0_EBX] =
1192 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1193 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1194 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1195 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1196 CPUID_7_0_EBX_SMAP,
1197 .features[FEAT_XSAVE] =
1198 CPUID_XSAVE_XSAVEOPT,
1199 .features[FEAT_6_EAX] =
1200 CPUID_6_EAX_ARAT,
1201 .xlevel = 0x80000008,
1202 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1203 },
1204 {
1205 .name = "Broadwell",
1206 .level = 0xd,
1207 .vendor = CPUID_VENDOR_INTEL,
1208 .family = 6,
1209 .model = 61,
1210 .stepping = 2,
1211 .features[FEAT_1_EDX] =
1212 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1213 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1214 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1215 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1216 CPUID_DE | CPUID_FP87,
1217 .features[FEAT_1_ECX] =
1218 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1219 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1220 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1221 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1222 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1223 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1224 .features[FEAT_8000_0001_EDX] =
1225 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1226 CPUID_EXT2_SYSCALL,
1227 .features[FEAT_8000_0001_ECX] =
1228 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1229 .features[FEAT_7_0_EBX] =
1230 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1231 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1232 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1233 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1234 CPUID_7_0_EBX_SMAP,
1235 .features[FEAT_XSAVE] =
1236 CPUID_XSAVE_XSAVEOPT,
1237 .features[FEAT_6_EAX] =
1238 CPUID_6_EAX_ARAT,
1239 .xlevel = 0x80000008,
1240 .model_id = "Intel Core Processor (Broadwell)",
1241 },
1242 {
1243 .name = "Skylake-Client",
1244 .level = 0xd,
1245 .vendor = CPUID_VENDOR_INTEL,
1246 .family = 6,
1247 .model = 94,
1248 .stepping = 3,
1249 .features[FEAT_1_EDX] =
1250 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1251 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1252 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1253 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1254 CPUID_DE | CPUID_FP87,
1255 .features[FEAT_1_ECX] =
1256 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1257 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1258 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1259 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1260 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1261 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1262 .features[FEAT_8000_0001_EDX] =
1263 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1264 CPUID_EXT2_SYSCALL,
1265 .features[FEAT_8000_0001_ECX] =
1266 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1267 .features[FEAT_7_0_EBX] =
1268 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1269 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1270 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1271 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1272 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1273 /* Missing: XSAVES (not supported by some Linux versions,
1274 * including v4.1 to v4.6).
1275 * KVM doesn't yet expose any XSAVES state save component,
1276 * and the only one defined in Skylake (processor tracing)
1277 * probably will block migration anyway.
1278 */
1279 .features[FEAT_XSAVE] =
1280 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1281 CPUID_XSAVE_XGETBV1,
1282 .features[FEAT_6_EAX] =
1283 CPUID_6_EAX_ARAT,
1284 .xlevel = 0x80000008,
1285 .model_id = "Intel Core Processor (Skylake)",
1286 },
1287 {
1288 .name = "Opteron_G1",
1289 .level = 5,
1290 .vendor = CPUID_VENDOR_AMD,
1291 .family = 15,
1292 .model = 6,
1293 .stepping = 1,
1294 .features[FEAT_1_EDX] =
1295 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1296 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1297 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1298 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1299 CPUID_DE | CPUID_FP87,
1300 .features[FEAT_1_ECX] =
1301 CPUID_EXT_SSE3,
1302 .features[FEAT_8000_0001_EDX] =
1303 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1304 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1305 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1306 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1307 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1308 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1309 .xlevel = 0x80000008,
1310 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1311 },
1312 {
1313 .name = "Opteron_G2",
1314 .level = 5,
1315 .vendor = CPUID_VENDOR_AMD,
1316 .family = 15,
1317 .model = 6,
1318 .stepping = 1,
1319 .features[FEAT_1_EDX] =
1320 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1321 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1322 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1323 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1324 CPUID_DE | CPUID_FP87,
1325 .features[FEAT_1_ECX] =
1326 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1327 /* Missing: CPUID_EXT2_RDTSCP */
1328 .features[FEAT_8000_0001_EDX] =
1329 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1330 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1331 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1332 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1333 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1334 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1335 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1336 .features[FEAT_8000_0001_ECX] =
1337 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1338 .xlevel = 0x80000008,
1339 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1340 },
1341 {
1342 .name = "Opteron_G3",
1343 .level = 5,
1344 .vendor = CPUID_VENDOR_AMD,
1345 .family = 15,
1346 .model = 6,
1347 .stepping = 1,
1348 .features[FEAT_1_EDX] =
1349 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1350 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1351 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1352 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1353 CPUID_DE | CPUID_FP87,
1354 .features[FEAT_1_ECX] =
1355 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1356 CPUID_EXT_SSE3,
1357 /* Missing: CPUID_EXT2_RDTSCP */
1358 .features[FEAT_8000_0001_EDX] =
1359 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1360 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1361 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1362 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1363 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1364 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1365 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1366 .features[FEAT_8000_0001_ECX] =
1367 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1368 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1369 .xlevel = 0x80000008,
1370 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1371 },
1372 {
1373 .name = "Opteron_G4",
1374 .level = 0xd,
1375 .vendor = CPUID_VENDOR_AMD,
1376 .family = 21,
1377 .model = 1,
1378 .stepping = 2,
1379 .features[FEAT_1_EDX] =
1380 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1381 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1382 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1383 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1384 CPUID_DE | CPUID_FP87,
1385 .features[FEAT_1_ECX] =
1386 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1387 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1388 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1389 CPUID_EXT_SSE3,
1390 /* Missing: CPUID_EXT2_RDTSCP */
1391 .features[FEAT_8000_0001_EDX] =
1392 CPUID_EXT2_LM |
1393 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1394 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1395 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1396 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1397 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1398 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1399 .features[FEAT_8000_0001_ECX] =
1400 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1401 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1402 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1403 CPUID_EXT3_LAHF_LM,
1404 /* no xsaveopt! */
1405 .xlevel = 0x8000001A,
1406 .model_id = "AMD Opteron 62xx class CPU",
1407 },
1408 {
1409 .name = "Opteron_G5",
1410 .level = 0xd,
1411 .vendor = CPUID_VENDOR_AMD,
1412 .family = 21,
1413 .model = 2,
1414 .stepping = 0,
1415 .features[FEAT_1_EDX] =
1416 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1417 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1418 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1419 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1420 CPUID_DE | CPUID_FP87,
1421 .features[FEAT_1_ECX] =
1422 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1423 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1424 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1425 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1426 /* Missing: CPUID_EXT2_RDTSCP */
1427 .features[FEAT_8000_0001_EDX] =
1428 CPUID_EXT2_LM |
1429 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1430 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1431 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1432 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1433 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1434 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1435 .features[FEAT_8000_0001_ECX] =
1436 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1437 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1438 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1439 CPUID_EXT3_LAHF_LM,
1440 /* no xsaveopt! */
1441 .xlevel = 0x8000001A,
1442 .model_id = "AMD Opteron 63xx class CPU",
1443 },
1444 };
1445
1446 typedef struct PropValue {
1447 const char *prop, *value;
1448 } PropValue;
1449
1450 /* KVM-specific features that are automatically added/removed
1451 * from all CPU models when KVM is enabled.
1452 */
1453 static PropValue kvm_default_props[] = {
1454 { "kvmclock", "on" },
1455 { "kvm-nopiodelay", "on" },
1456 { "kvm-asyncpf", "on" },
1457 { "kvm-steal-time", "on" },
1458 { "kvm-pv-eoi", "on" },
1459 { "kvmclock-stable-bit", "on" },
1460 { "x2apic", "on" },
1461 { "acpi", "off" },
1462 { "monitor", "off" },
1463 { "svm", "off" },
1464 { NULL, NULL },
1465 };
1466
1467 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1468 {
1469 PropValue *pv;
1470 for (pv = kvm_default_props; pv->prop; pv++) {
1471 if (!strcmp(pv->prop, prop)) {
1472 pv->value = value;
1473 break;
1474 }
1475 }
1476
1477 /* It is valid to call this function only for properties that
1478 * are already present in the kvm_default_props table.
1479 */
1480 assert(pv->prop);
1481 }
1482
1483 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1484 bool migratable_only);
1485
1486 #ifdef CONFIG_KVM
1487
1488 static int cpu_x86_fill_model_id(char *str)
1489 {
1490 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1491 int i;
1492
1493 for (i = 0; i < 3; i++) {
1494 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1495 memcpy(str + i * 16 + 0, &eax, 4);
1496 memcpy(str + i * 16 + 4, &ebx, 4);
1497 memcpy(str + i * 16 + 8, &ecx, 4);
1498 memcpy(str + i * 16 + 12, &edx, 4);
1499 }
1500 return 0;
1501 }
1502
1503 static X86CPUDefinition host_cpudef;
1504
1505 static Property host_x86_cpu_properties[] = {
1506 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1507 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1508 DEFINE_PROP_END_OF_LIST()
1509 };
1510
1511 /* class_init for the "host" CPU model
1512 *
1513 * This function may be called before KVM is initialized.
1514 */
1515 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1516 {
1517 DeviceClass *dc = DEVICE_CLASS(oc);
1518 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1519 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1520
1521 xcc->kvm_required = true;
1522
1523 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1524 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1525
1526 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1527 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1528 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1529 host_cpudef.stepping = eax & 0x0F;
1530
1531 cpu_x86_fill_model_id(host_cpudef.model_id);
1532
1533 xcc->cpu_def = &host_cpudef;
1534
1535 /* level, xlevel, xlevel2, and the feature words are initialized on
1536 * instance_init, because they require KVM to be initialized.
1537 */
1538
1539 dc->props = host_x86_cpu_properties;
1540 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1541 dc->cannot_destroy_with_object_finalize_yet = true;
1542 }
1543
1544 static void host_x86_cpu_initfn(Object *obj)
1545 {
1546 X86CPU *cpu = X86_CPU(obj);
1547 CPUX86State *env = &cpu->env;
1548 KVMState *s = kvm_state;
1549
1550 assert(kvm_enabled());
1551
1552 /* We can't fill the features array here because we don't know yet if
1553 * "migratable" is true or false.
1554 */
1555 cpu->host_features = true;
1556
1557 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1558 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1559 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1560
1561 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1562 }
1563
1564 static const TypeInfo host_x86_cpu_type_info = {
1565 .name = X86_CPU_TYPE_NAME("host"),
1566 .parent = TYPE_X86_CPU,
1567 .instance_init = host_x86_cpu_initfn,
1568 .class_init = host_x86_cpu_class_init,
1569 };
1570
1571 #endif
1572
1573 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1574 {
1575 FeatureWordInfo *f = &feature_word_info[w];
1576 int i;
1577
1578 for (i = 0; i < 32; ++i) {
1579 if ((1UL << i) & mask) {
1580 const char *reg = get_register_name_32(f->cpuid_reg);
1581 assert(reg);
1582 fprintf(stderr, "warning: %s doesn't support requested feature: "
1583 "CPUID.%02XH:%s%s%s [bit %d]\n",
1584 kvm_enabled() ? "host" : "TCG",
1585 f->cpuid_eax, reg,
1586 f->feat_names[i] ? "." : "",
1587 f->feat_names[i] ? f->feat_names[i] : "", i);
1588 }
1589 }
1590 }
1591
1592 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1593 const char *name, void *opaque,
1594 Error **errp)
1595 {
1596 X86CPU *cpu = X86_CPU(obj);
1597 CPUX86State *env = &cpu->env;
1598 int64_t value;
1599
1600 value = (env->cpuid_version >> 8) & 0xf;
1601 if (value == 0xf) {
1602 value += (env->cpuid_version >> 20) & 0xff;
1603 }
1604 visit_type_int(v, name, &value, errp);
1605 }
1606
1607 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1608 const char *name, void *opaque,
1609 Error **errp)
1610 {
1611 X86CPU *cpu = X86_CPU(obj);
1612 CPUX86State *env = &cpu->env;
1613 const int64_t min = 0;
1614 const int64_t max = 0xff + 0xf;
1615 Error *local_err = NULL;
1616 int64_t value;
1617
1618 visit_type_int(v, name, &value, &local_err);
1619 if (local_err) {
1620 error_propagate(errp, local_err);
1621 return;
1622 }
1623 if (value < min || value > max) {
1624 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1625 name ? name : "null", value, min, max);
1626 return;
1627 }
1628
1629 env->cpuid_version &= ~0xff00f00;
1630 if (value > 0x0f) {
1631 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1632 } else {
1633 env->cpuid_version |= value << 8;
1634 }
1635 }
1636
1637 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1638 const char *name, void *opaque,
1639 Error **errp)
1640 {
1641 X86CPU *cpu = X86_CPU(obj);
1642 CPUX86State *env = &cpu->env;
1643 int64_t value;
1644
1645 value = (env->cpuid_version >> 4) & 0xf;
1646 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1647 visit_type_int(v, name, &value, errp);
1648 }
1649
1650 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1651 const char *name, void *opaque,
1652 Error **errp)
1653 {
1654 X86CPU *cpu = X86_CPU(obj);
1655 CPUX86State *env = &cpu->env;
1656 const int64_t min = 0;
1657 const int64_t max = 0xff;
1658 Error *local_err = NULL;
1659 int64_t value;
1660
1661 visit_type_int(v, name, &value, &local_err);
1662 if (local_err) {
1663 error_propagate(errp, local_err);
1664 return;
1665 }
1666 if (value < min || value > max) {
1667 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1668 name ? name : "null", value, min, max);
1669 return;
1670 }
1671
1672 env->cpuid_version &= ~0xf00f0;
1673 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1674 }
1675
1676 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1677 const char *name, void *opaque,
1678 Error **errp)
1679 {
1680 X86CPU *cpu = X86_CPU(obj);
1681 CPUX86State *env = &cpu->env;
1682 int64_t value;
1683
1684 value = env->cpuid_version & 0xf;
1685 visit_type_int(v, name, &value, errp);
1686 }
1687
1688 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1689 const char *name, void *opaque,
1690 Error **errp)
1691 {
1692 X86CPU *cpu = X86_CPU(obj);
1693 CPUX86State *env = &cpu->env;
1694 const int64_t min = 0;
1695 const int64_t max = 0xf;
1696 Error *local_err = NULL;
1697 int64_t value;
1698
1699 visit_type_int(v, name, &value, &local_err);
1700 if (local_err) {
1701 error_propagate(errp, local_err);
1702 return;
1703 }
1704 if (value < min || value > max) {
1705 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1706 name ? name : "null", value, min, max);
1707 return;
1708 }
1709
1710 env->cpuid_version &= ~0xf;
1711 env->cpuid_version |= value & 0xf;
1712 }
1713
1714 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1715 {
1716 X86CPU *cpu = X86_CPU(obj);
1717 CPUX86State *env = &cpu->env;
1718 char *value;
1719
1720 value = g_malloc(CPUID_VENDOR_SZ + 1);
1721 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1722 env->cpuid_vendor3);
1723 return value;
1724 }
1725
1726 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1727 Error **errp)
1728 {
1729 X86CPU *cpu = X86_CPU(obj);
1730 CPUX86State *env = &cpu->env;
1731 int i;
1732
1733 if (strlen(value) != CPUID_VENDOR_SZ) {
1734 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1735 return;
1736 }
1737
1738 env->cpuid_vendor1 = 0;
1739 env->cpuid_vendor2 = 0;
1740 env->cpuid_vendor3 = 0;
1741 for (i = 0; i < 4; i++) {
1742 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1743 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1744 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1745 }
1746 }
1747
1748 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1749 {
1750 X86CPU *cpu = X86_CPU(obj);
1751 CPUX86State *env = &cpu->env;
1752 char *value;
1753 int i;
1754
1755 value = g_malloc(48 + 1);
1756 for (i = 0; i < 48; i++) {
1757 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1758 }
1759 value[48] = '\0';
1760 return value;
1761 }
1762
1763 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1764 Error **errp)
1765 {
1766 X86CPU *cpu = X86_CPU(obj);
1767 CPUX86State *env = &cpu->env;
1768 int c, len, i;
1769
1770 if (model_id == NULL) {
1771 model_id = "";
1772 }
1773 len = strlen(model_id);
1774 memset(env->cpuid_model, 0, 48);
1775 for (i = 0; i < 48; i++) {
1776 if (i >= len) {
1777 c = '\0';
1778 } else {
1779 c = (uint8_t)model_id[i];
1780 }
1781 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1782 }
1783 }
1784
1785 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1786 void *opaque, Error **errp)
1787 {
1788 X86CPU *cpu = X86_CPU(obj);
1789 int64_t value;
1790
1791 value = cpu->env.tsc_khz * 1000;
1792 visit_type_int(v, name, &value, errp);
1793 }
1794
1795 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1796 void *opaque, Error **errp)
1797 {
1798 X86CPU *cpu = X86_CPU(obj);
1799 const int64_t min = 0;
1800 const int64_t max = INT64_MAX;
1801 Error *local_err = NULL;
1802 int64_t value;
1803
1804 visit_type_int(v, name, &value, &local_err);
1805 if (local_err) {
1806 error_propagate(errp, local_err);
1807 return;
1808 }
1809 if (value < min || value > max) {
1810 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1811 name ? name : "null", value, min, max);
1812 return;
1813 }
1814
1815 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1816 }
1817
1818 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1819 void *opaque, Error **errp)
1820 {
1821 X86CPU *cpu = X86_CPU(obj);
1822 int64_t value = cpu->apic_id;
1823
1824 visit_type_int(v, name, &value, errp);
1825 }
1826
1827 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1828 void *opaque, Error **errp)
1829 {
1830 X86CPU *cpu = X86_CPU(obj);
1831 DeviceState *dev = DEVICE(obj);
1832 const int64_t min = 0;
1833 const int64_t max = UINT32_MAX;
1834 Error *error = NULL;
1835 int64_t value;
1836
1837 if (dev->realized) {
1838 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1839 "it was realized", name, object_get_typename(obj));
1840 return;
1841 }
1842
1843 visit_type_int(v, name, &value, &error);
1844 if (error) {
1845 error_propagate(errp, error);
1846 return;
1847 }
1848 if (value < min || value > max) {
1849 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1850 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1851 object_get_typename(obj), name, value, min, max);
1852 return;
1853 }
1854
1855 if ((value != cpu->apic_id) && cpu_exists(value)) {
1856 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1857 return;
1858 }
1859 cpu->apic_id = value;
1860 }
1861
1862 /* Generic getter for "feature-words" and "filtered-features" properties */
1863 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1864 const char *name, void *opaque,
1865 Error **errp)
1866 {
1867 uint32_t *array = (uint32_t *)opaque;
1868 FeatureWord w;
1869 Error *err = NULL;
1870 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1871 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1872 X86CPUFeatureWordInfoList *list = NULL;
1873
1874 for (w = 0; w < FEATURE_WORDS; w++) {
1875 FeatureWordInfo *wi = &feature_word_info[w];
1876 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1877 qwi->cpuid_input_eax = wi->cpuid_eax;
1878 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1879 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1880 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1881 qwi->features = array[w];
1882
1883 /* List will be in reverse order, but order shouldn't matter */
1884 list_entries[w].next = list;
1885 list_entries[w].value = &word_infos[w];
1886 list = &list_entries[w];
1887 }
1888
1889 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1890 error_propagate(errp, err);
1891 }
1892
1893 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1894 void *opaque, Error **errp)
1895 {
1896 X86CPU *cpu = X86_CPU(obj);
1897 int64_t value = cpu->hyperv_spinlock_attempts;
1898
1899 visit_type_int(v, name, &value, errp);
1900 }
1901
1902 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1903 void *opaque, Error **errp)
1904 {
1905 const int64_t min = 0xFFF;
1906 const int64_t max = UINT_MAX;
1907 X86CPU *cpu = X86_CPU(obj);
1908 Error *err = NULL;
1909 int64_t value;
1910
1911 visit_type_int(v, name, &value, &err);
1912 if (err) {
1913 error_propagate(errp, err);
1914 return;
1915 }
1916
1917 if (value < min || value > max) {
1918 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1919 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1920 object_get_typename(obj), name ? name : "null",
1921 value, min, max);
1922 return;
1923 }
1924 cpu->hyperv_spinlock_attempts = value;
1925 }
1926
1927 static PropertyInfo qdev_prop_spinlocks = {
1928 .name = "int",
1929 .get = x86_get_hv_spinlocks,
1930 .set = x86_set_hv_spinlocks,
1931 };
1932
1933 /* Convert all '_' in a feature string option name to '-', to make feature
1934 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1935 */
1936 static inline void feat2prop(char *s)
1937 {
1938 while ((s = strchr(s, '_'))) {
1939 *s = '-';
1940 }
1941 }
1942
1943 /* Compatibily hack to maintain legacy +-feat semantic,
1944 * where +-feat overwrites any feature set by
1945 * feat=on|feat even if the later is parsed after +-feat
1946 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1947 */
1948 static FeatureWordArray plus_features = { 0 };
1949 static FeatureWordArray minus_features = { 0 };
1950
1951 /* Parse "+feature,-feature,feature=foo" CPU feature string
1952 */
1953 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1954 Error **errp)
1955 {
1956 X86CPU *cpu = X86_CPU(cs);
1957 char *featurestr; /* Single 'key=value" string being parsed */
1958 Error *local_err = NULL;
1959
1960 featurestr = features ? strtok(features, ",") : NULL;
1961
1962 while (featurestr) {
1963 char *val;
1964 if (featurestr[0] == '+') {
1965 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1966 } else if (featurestr[0] == '-') {
1967 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1968 } else if ((val = strchr(featurestr, '='))) {
1969 *val = 0; val++;
1970 feat2prop(featurestr);
1971 if (!strcmp(featurestr, "tsc-freq")) {
1972 int64_t tsc_freq;
1973 char *err;
1974 char num[32];
1975
1976 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1977 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1978 if (tsc_freq < 0 || *err) {
1979 error_setg(errp, "bad numerical value %s", val);
1980 return;
1981 }
1982 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1983 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1984 &local_err);
1985 } else {
1986 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1987 }
1988 } else {
1989 feat2prop(featurestr);
1990 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1991 }
1992 if (local_err) {
1993 error_propagate(errp, local_err);
1994 return;
1995 }
1996 featurestr = strtok(NULL, ",");
1997 }
1998 }
1999
2000 /* Print all cpuid feature names in featureset
2001 */
2002 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2003 {
2004 int bit;
2005 bool first = true;
2006
2007 for (bit = 0; bit < 32; bit++) {
2008 if (featureset[bit]) {
2009 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2010 first = false;
2011 }
2012 }
2013 }
2014
2015 /* generate CPU information. */
2016 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2017 {
2018 X86CPUDefinition *def;
2019 char buf[256];
2020 int i;
2021
2022 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2023 def = &builtin_x86_defs[i];
2024 snprintf(buf, sizeof(buf), "%s", def->name);
2025 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2026 }
2027 #ifdef CONFIG_KVM
2028 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2029 "KVM processor with all supported host features "
2030 "(only available in KVM mode)");
2031 #endif
2032
2033 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2034 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2035 FeatureWordInfo *fw = &feature_word_info[i];
2036
2037 (*cpu_fprintf)(f, " ");
2038 listflags(f, cpu_fprintf, fw->feat_names);
2039 (*cpu_fprintf)(f, "\n");
2040 }
2041 }
2042
2043 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2044 {
2045 CpuDefinitionInfoList *cpu_list = NULL;
2046 X86CPUDefinition *def;
2047 int i;
2048
2049 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2050 CpuDefinitionInfoList *entry;
2051 CpuDefinitionInfo *info;
2052
2053 def = &builtin_x86_defs[i];
2054 info = g_malloc0(sizeof(*info));
2055 info->name = g_strdup(def->name);
2056
2057 entry = g_malloc0(sizeof(*entry));
2058 entry->value = info;
2059 entry->next = cpu_list;
2060 cpu_list = entry;
2061 }
2062
2063 return cpu_list;
2064 }
2065
2066 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2067 bool migratable_only)
2068 {
2069 FeatureWordInfo *wi = &feature_word_info[w];
2070 uint32_t r;
2071
2072 if (kvm_enabled()) {
2073 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2074 wi->cpuid_ecx,
2075 wi->cpuid_reg);
2076 } else if (tcg_enabled()) {
2077 r = wi->tcg_features;
2078 } else {
2079 return ~0;
2080 }
2081 if (migratable_only) {
2082 r &= x86_cpu_get_migratable_flags(w);
2083 }
2084 return r;
2085 }
2086
2087 /*
2088 * Filters CPU feature words based on host availability of each feature.
2089 *
2090 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2091 */
2092 static int x86_cpu_filter_features(X86CPU *cpu)
2093 {
2094 CPUX86State *env = &cpu->env;
2095 FeatureWord w;
2096 int rv = 0;
2097
2098 for (w = 0; w < FEATURE_WORDS; w++) {
2099 uint32_t host_feat =
2100 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2101 uint32_t requested_features = env->features[w];
2102 env->features[w] &= host_feat;
2103 cpu->filtered_features[w] = requested_features & ~env->features[w];
2104 if (cpu->filtered_features[w]) {
2105 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2106 report_unavailable_features(w, cpu->filtered_features[w]);
2107 }
2108 rv = 1;
2109 }
2110 }
2111
2112 return rv;
2113 }
2114
2115 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2116 {
2117 PropValue *pv;
2118 for (pv = props; pv->prop; pv++) {
2119 if (!pv->value) {
2120 continue;
2121 }
2122 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2123 &error_abort);
2124 }
2125 }
2126
2127 /* Load data from X86CPUDefinition
2128 */
2129 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2130 {
2131 CPUX86State *env = &cpu->env;
2132 const char *vendor;
2133 char host_vendor[CPUID_VENDOR_SZ + 1];
2134 FeatureWord w;
2135
2136 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2137 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2138 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2139 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2140 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2141 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2142 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2143 for (w = 0; w < FEATURE_WORDS; w++) {
2144 env->features[w] = def->features[w];
2145 }
2146
2147 /* Special cases not set in the X86CPUDefinition structs: */
2148 if (kvm_enabled()) {
2149 if (!kvm_irqchip_in_kernel()) {
2150 x86_cpu_change_kvm_default("x2apic", "off");
2151 }
2152
2153 x86_cpu_apply_props(cpu, kvm_default_props);
2154 }
2155
2156 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2157
2158 /* sysenter isn't supported in compatibility mode on AMD,
2159 * syscall isn't supported in compatibility mode on Intel.
2160 * Normally we advertise the actual CPU vendor, but you can
2161 * override this using the 'vendor' property if you want to use
2162 * KVM's sysenter/syscall emulation in compatibility mode and
2163 * when doing cross vendor migration
2164 */
2165 vendor = def->vendor;
2166 if (kvm_enabled()) {
2167 uint32_t ebx = 0, ecx = 0, edx = 0;
2168 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2169 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2170 vendor = host_vendor;
2171 }
2172
2173 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2174
2175 }
2176
2177 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2178 {
2179 X86CPU *cpu = NULL;
2180 X86CPUClass *xcc;
2181 ObjectClass *oc;
2182 gchar **model_pieces;
2183 char *name, *features;
2184 Error *error = NULL;
2185
2186 model_pieces = g_strsplit(cpu_model, ",", 2);
2187 if (!model_pieces[0]) {
2188 error_setg(&error, "Invalid/empty CPU model name");
2189 goto out;
2190 }
2191 name = model_pieces[0];
2192 features = model_pieces[1];
2193
2194 oc = x86_cpu_class_by_name(name);
2195 if (oc == NULL) {
2196 error_setg(&error, "Unable to find CPU definition: %s", name);
2197 goto out;
2198 }
2199 xcc = X86_CPU_CLASS(oc);
2200
2201 if (xcc->kvm_required && !kvm_enabled()) {
2202 error_setg(&error, "CPU model '%s' requires KVM", name);
2203 goto out;
2204 }
2205
2206 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2207
2208 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2209 if (error) {
2210 goto out;
2211 }
2212
2213 out:
2214 if (error != NULL) {
2215 error_propagate(errp, error);
2216 if (cpu) {
2217 object_unref(OBJECT(cpu));
2218 cpu = NULL;
2219 }
2220 }
2221 g_strfreev(model_pieces);
2222 return cpu;
2223 }
2224
2225 X86CPU *cpu_x86_init(const char *cpu_model)
2226 {
2227 Error *error = NULL;
2228 X86CPU *cpu;
2229
2230 cpu = cpu_x86_create(cpu_model, &error);
2231 if (error) {
2232 goto out;
2233 }
2234
2235 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2236
2237 out:
2238 if (error) {
2239 error_report_err(error);
2240 if (cpu != NULL) {
2241 object_unref(OBJECT(cpu));
2242 cpu = NULL;
2243 }
2244 }
2245 return cpu;
2246 }
2247
2248 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2249 {
2250 X86CPUDefinition *cpudef = data;
2251 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2252
2253 xcc->cpu_def = cpudef;
2254 }
2255
2256 static void x86_register_cpudef_type(X86CPUDefinition *def)
2257 {
2258 char *typename = x86_cpu_type_name(def->name);
2259 TypeInfo ti = {
2260 .name = typename,
2261 .parent = TYPE_X86_CPU,
2262 .class_init = x86_cpu_cpudef_class_init,
2263 .class_data = def,
2264 };
2265
2266 type_register(&ti);
2267 g_free(typename);
2268 }
2269
2270 #if !defined(CONFIG_USER_ONLY)
2271
2272 void cpu_clear_apic_feature(CPUX86State *env)
2273 {
2274 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2275 }
2276
2277 #endif /* !CONFIG_USER_ONLY */
2278
2279 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2280 uint32_t *eax, uint32_t *ebx,
2281 uint32_t *ecx, uint32_t *edx)
2282 {
2283 X86CPU *cpu = x86_env_get_cpu(env);
2284 CPUState *cs = CPU(cpu);
2285
2286 /* test if maximum index reached */
2287 if (index & 0x80000000) {
2288 if (index > env->cpuid_xlevel) {
2289 if (env->cpuid_xlevel2 > 0) {
2290 /* Handle the Centaur's CPUID instruction. */
2291 if (index > env->cpuid_xlevel2) {
2292 index = env->cpuid_xlevel2;
2293 } else if (index < 0xC0000000) {
2294 index = env->cpuid_xlevel;
2295 }
2296 } else {
2297 /* Intel documentation states that invalid EAX input will
2298 * return the same information as EAX=cpuid_level
2299 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2300 */
2301 index = env->cpuid_level;
2302 }
2303 }
2304 } else {
2305 if (index > env->cpuid_level)
2306 index = env->cpuid_level;
2307 }
2308
2309 switch(index) {
2310 case 0:
2311 *eax = env->cpuid_level;
2312 *ebx = env->cpuid_vendor1;
2313 *edx = env->cpuid_vendor2;
2314 *ecx = env->cpuid_vendor3;
2315 break;
2316 case 1:
2317 *eax = env->cpuid_version;
2318 *ebx = (cpu->apic_id << 24) |
2319 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2320 *ecx = env->features[FEAT_1_ECX];
2321 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2322 *ecx |= CPUID_EXT_OSXSAVE;
2323 }
2324 *edx = env->features[FEAT_1_EDX];
2325 if (cs->nr_cores * cs->nr_threads > 1) {
2326 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2327 *edx |= CPUID_HT;
2328 }
2329 break;
2330 case 2:
2331 /* cache info: needed for Pentium Pro compatibility */
2332 if (cpu->cache_info_passthrough) {
2333 host_cpuid(index, 0, eax, ebx, ecx, edx);
2334 break;
2335 }
2336 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2337 *ebx = 0;
2338 *ecx = 0;
2339 *edx = (L1D_DESCRIPTOR << 16) | \
2340 (L1I_DESCRIPTOR << 8) | \
2341 (L2_DESCRIPTOR);
2342 break;
2343 case 4:
2344 /* cache info: needed for Core compatibility */
2345 if (cpu->cache_info_passthrough) {
2346 host_cpuid(index, count, eax, ebx, ecx, edx);
2347 *eax &= ~0xFC000000;
2348 } else {
2349 *eax = 0;
2350 switch (count) {
2351 case 0: /* L1 dcache info */
2352 *eax |= CPUID_4_TYPE_DCACHE | \
2353 CPUID_4_LEVEL(1) | \
2354 CPUID_4_SELF_INIT_LEVEL;
2355 *ebx = (L1D_LINE_SIZE - 1) | \
2356 ((L1D_PARTITIONS - 1) << 12) | \
2357 ((L1D_ASSOCIATIVITY - 1) << 22);
2358 *ecx = L1D_SETS - 1;
2359 *edx = CPUID_4_NO_INVD_SHARING;
2360 break;
2361 case 1: /* L1 icache info */
2362 *eax |= CPUID_4_TYPE_ICACHE | \
2363 CPUID_4_LEVEL(1) | \
2364 CPUID_4_SELF_INIT_LEVEL;
2365 *ebx = (L1I_LINE_SIZE - 1) | \
2366 ((L1I_PARTITIONS - 1) << 12) | \
2367 ((L1I_ASSOCIATIVITY - 1) << 22);
2368 *ecx = L1I_SETS - 1;
2369 *edx = CPUID_4_NO_INVD_SHARING;
2370 break;
2371 case 2: /* L2 cache info */
2372 *eax |= CPUID_4_TYPE_UNIFIED | \
2373 CPUID_4_LEVEL(2) | \
2374 CPUID_4_SELF_INIT_LEVEL;
2375 if (cs->nr_threads > 1) {
2376 *eax |= (cs->nr_threads - 1) << 14;
2377 }
2378 *ebx = (L2_LINE_SIZE - 1) | \
2379 ((L2_PARTITIONS - 1) << 12) | \
2380 ((L2_ASSOCIATIVITY - 1) << 22);
2381 *ecx = L2_SETS - 1;
2382 *edx = CPUID_4_NO_INVD_SHARING;
2383 break;
2384 default: /* end of info */
2385 *eax = 0;
2386 *ebx = 0;
2387 *ecx = 0;
2388 *edx = 0;
2389 break;
2390 }
2391 }
2392
2393 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2394 if ((*eax & 31) && cs->nr_cores > 1) {
2395 *eax |= (cs->nr_cores - 1) << 26;
2396 }
2397 break;
2398 case 5:
2399 /* mwait info: needed for Core compatibility */
2400 *eax = 0; /* Smallest monitor-line size in bytes */
2401 *ebx = 0; /* Largest monitor-line size in bytes */
2402 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2403 *edx = 0;
2404 break;
2405 case 6:
2406 /* Thermal and Power Leaf */
2407 *eax = env->features[FEAT_6_EAX];
2408 *ebx = 0;
2409 *ecx = 0;
2410 *edx = 0;
2411 break;
2412 case 7:
2413 /* Structured Extended Feature Flags Enumeration Leaf */
2414 if (count == 0) {
2415 *eax = 0; /* Maximum ECX value for sub-leaves */
2416 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2417 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2418 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2419 *ecx |= CPUID_7_0_ECX_OSPKE;
2420 }
2421 *edx = 0; /* Reserved */
2422 } else {
2423 *eax = 0;
2424 *ebx = 0;
2425 *ecx = 0;
2426 *edx = 0;
2427 }
2428 break;
2429 case 9:
2430 /* Direct Cache Access Information Leaf */
2431 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2432 *ebx = 0;
2433 *ecx = 0;
2434 *edx = 0;
2435 break;
2436 case 0xA:
2437 /* Architectural Performance Monitoring Leaf */
2438 if (kvm_enabled() && cpu->enable_pmu) {
2439 KVMState *s = cs->kvm_state;
2440
2441 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2442 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2443 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2444 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2445 } else {
2446 *eax = 0;
2447 *ebx = 0;
2448 *ecx = 0;
2449 *edx = 0;
2450 }
2451 break;
2452 case 0xB:
2453 /* Extended Topology Enumeration Leaf */
2454 if (!cpu->enable_cpuid_0xb) {
2455 *eax = *ebx = *ecx = *edx = 0;
2456 break;
2457 }
2458
2459 *ecx = count & 0xff;
2460 *edx = cpu->apic_id;
2461
2462 switch (count) {
2463 case 0:
2464 *eax = apicid_core_offset(smp_cores, smp_threads);
2465 *ebx = smp_threads;
2466 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2467 break;
2468 case 1:
2469 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2470 *ebx = smp_cores * smp_threads;
2471 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2472 break;
2473 default:
2474 *eax = 0;
2475 *ebx = 0;
2476 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2477 }
2478
2479 assert(!(*eax & ~0x1f));
2480 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2481 break;
2482 case 0xD: {
2483 KVMState *s = cs->kvm_state;
2484 uint64_t ena_mask;
2485 int i;
2486
2487 /* Processor Extended State */
2488 *eax = 0;
2489 *ebx = 0;
2490 *ecx = 0;
2491 *edx = 0;
2492 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2493 break;
2494 }
2495 if (kvm_enabled()) {
2496 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2497 ena_mask <<= 32;
2498 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2499 } else {
2500 ena_mask = -1;
2501 }
2502
2503 if (count == 0) {
2504 *ecx = 0x240;
2505 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2506 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2507 if ((env->features[esa->feature] & esa->bits) == esa->bits
2508 && ((ena_mask >> i) & 1) != 0) {
2509 if (i < 32) {
2510 *eax |= 1u << i;
2511 } else {
2512 *edx |= 1u << (i - 32);
2513 }
2514 *ecx = MAX(*ecx, esa->offset + esa->size);
2515 }
2516 }
2517 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2518 *ebx = *ecx;
2519 } else if (count == 1) {
2520 *eax = env->features[FEAT_XSAVE];
2521 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2522 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2523 if ((env->features[esa->feature] & esa->bits) == esa->bits
2524 && ((ena_mask >> count) & 1) != 0) {
2525 *eax = esa->size;
2526 *ebx = esa->offset;
2527 }
2528 }
2529 break;
2530 }
2531 case 0x80000000:
2532 *eax = env->cpuid_xlevel;
2533 *ebx = env->cpuid_vendor1;
2534 *edx = env->cpuid_vendor2;
2535 *ecx = env->cpuid_vendor3;
2536 break;
2537 case 0x80000001:
2538 *eax = env->cpuid_version;
2539 *ebx = 0;
2540 *ecx = env->features[FEAT_8000_0001_ECX];
2541 *edx = env->features[FEAT_8000_0001_EDX];
2542
2543 /* The Linux kernel checks for the CMPLegacy bit and
2544 * discards multiple thread information if it is set.
2545 * So don't set it here for Intel to make Linux guests happy.
2546 */
2547 if (cs->nr_cores * cs->nr_threads > 1) {
2548 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2549 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2550 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2551 *ecx |= 1 << 1; /* CmpLegacy bit */
2552 }
2553 }
2554 break;
2555 case 0x80000002:
2556 case 0x80000003:
2557 case 0x80000004:
2558 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2559 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2560 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2561 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2562 break;
2563 case 0x80000005:
2564 /* cache info (L1 cache) */
2565 if (cpu->cache_info_passthrough) {
2566 host_cpuid(index, 0, eax, ebx, ecx, edx);
2567 break;
2568 }
2569 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2570 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2571 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2572 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2573 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2574 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2575 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2576 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2577 break;
2578 case 0x80000006:
2579 /* cache info (L2 cache) */
2580 if (cpu->cache_info_passthrough) {
2581 host_cpuid(index, 0, eax, ebx, ecx, edx);
2582 break;
2583 }
2584 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2585 (L2_DTLB_2M_ENTRIES << 16) | \
2586 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2587 (L2_ITLB_2M_ENTRIES);
2588 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2589 (L2_DTLB_4K_ENTRIES << 16) | \
2590 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2591 (L2_ITLB_4K_ENTRIES);
2592 *ecx = (L2_SIZE_KB_AMD << 16) | \
2593 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2594 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2595 *edx = ((L3_SIZE_KB/512) << 18) | \
2596 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2597 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2598 break;
2599 case 0x80000007:
2600 *eax = 0;
2601 *ebx = 0;
2602 *ecx = 0;
2603 *edx = env->features[FEAT_8000_0007_EDX];
2604 break;
2605 case 0x80000008:
2606 /* virtual & phys address size in low 2 bytes. */
2607 /* XXX: This value must match the one used in the MMU code. */
2608 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2609 /* 64 bit processor */
2610 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2611 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2612 } else {
2613 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2614 *eax = 0x00000024; /* 36 bits physical */
2615 } else {
2616 *eax = 0x00000020; /* 32 bits physical */
2617 }
2618 }
2619 *ebx = 0;
2620 *ecx = 0;
2621 *edx = 0;
2622 if (cs->nr_cores * cs->nr_threads > 1) {
2623 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2624 }
2625 break;
2626 case 0x8000000A:
2627 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2628 *eax = 0x00000001; /* SVM Revision */
2629 *ebx = 0x00000010; /* nr of ASIDs */
2630 *ecx = 0;
2631 *edx = env->features[FEAT_SVM]; /* optional features */
2632 } else {
2633 *eax = 0;
2634 *ebx = 0;
2635 *ecx = 0;
2636 *edx = 0;
2637 }
2638 break;
2639 case 0xC0000000:
2640 *eax = env->cpuid_xlevel2;
2641 *ebx = 0;
2642 *ecx = 0;
2643 *edx = 0;
2644 break;
2645 case 0xC0000001:
2646 /* Support for VIA CPU's CPUID instruction */
2647 *eax = env->cpuid_version;
2648 *ebx = 0;
2649 *ecx = 0;
2650 *edx = env->features[FEAT_C000_0001_EDX];
2651 break;
2652 case 0xC0000002:
2653 case 0xC0000003:
2654 case 0xC0000004:
2655 /* Reserved for the future, and now filled with zero */
2656 *eax = 0;
2657 *ebx = 0;
2658 *ecx = 0;
2659 *edx = 0;
2660 break;
2661 default:
2662 /* reserved values: zero */
2663 *eax = 0;
2664 *ebx = 0;
2665 *ecx = 0;
2666 *edx = 0;
2667 break;
2668 }
2669 }
2670
2671 /* CPUClass::reset() */
2672 static void x86_cpu_reset(CPUState *s)
2673 {
2674 X86CPU *cpu = X86_CPU(s);
2675 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2676 CPUX86State *env = &cpu->env;
2677 target_ulong cr4;
2678 uint64_t xcr0;
2679 int i;
2680
2681 xcc->parent_reset(s);
2682
2683 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2684
2685 tlb_flush(s, 1);
2686
2687 env->old_exception = -1;
2688
2689 /* init to reset state */
2690
2691 #ifdef CONFIG_SOFTMMU
2692 env->hflags |= HF_SOFTMMU_MASK;
2693 #endif
2694 env->hflags2 |= HF2_GIF_MASK;
2695
2696 cpu_x86_update_cr0(env, 0x60000010);
2697 env->a20_mask = ~0x0;
2698 env->smbase = 0x30000;
2699
2700 env->idt.limit = 0xffff;
2701 env->gdt.limit = 0xffff;
2702 env->ldt.limit = 0xffff;
2703 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2704 env->tr.limit = 0xffff;
2705 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2706
2707 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2708 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2709 DESC_R_MASK | DESC_A_MASK);
2710 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2711 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2712 DESC_A_MASK);
2713 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2714 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2715 DESC_A_MASK);
2716 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2717 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2718 DESC_A_MASK);
2719 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2720 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2721 DESC_A_MASK);
2722 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2723 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2724 DESC_A_MASK);
2725
2726 env->eip = 0xfff0;
2727 env->regs[R_EDX] = env->cpuid_version;
2728
2729 env->eflags = 0x2;
2730
2731 /* FPU init */
2732 for (i = 0; i < 8; i++) {
2733 env->fptags[i] = 1;
2734 }
2735 cpu_set_fpuc(env, 0x37f);
2736
2737 env->mxcsr = 0x1f80;
2738 /* All units are in INIT state. */
2739 env->xstate_bv = 0;
2740
2741 env->pat = 0x0007040600070406ULL;
2742 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2743
2744 memset(env->dr, 0, sizeof(env->dr));
2745 env->dr[6] = DR6_FIXED_1;
2746 env->dr[7] = DR7_FIXED_1;
2747 cpu_breakpoint_remove_all(s, BP_CPU);
2748 cpu_watchpoint_remove_all(s, BP_CPU);
2749
2750 cr4 = 0;
2751 xcr0 = XSTATE_FP_MASK;
2752
2753 #ifdef CONFIG_USER_ONLY
2754 /* Enable all the features for user-mode. */
2755 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2756 xcr0 |= XSTATE_SSE_MASK;
2757 }
2758 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2759 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2760 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2761 xcr0 |= 1ull << i;
2762 }
2763 }
2764
2765 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2766 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2767 }
2768 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2769 cr4 |= CR4_FSGSBASE_MASK;
2770 }
2771 #endif
2772
2773 env->xcr0 = xcr0;
2774 cpu_x86_update_cr4(env, cr4);
2775
2776 /*
2777 * SDM 11.11.5 requires:
2778 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2779 * - IA32_MTRR_PHYSMASKn.V = 0
2780 * All other bits are undefined. For simplification, zero it all.
2781 */
2782 env->mtrr_deftype = 0;
2783 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2784 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2785
2786 #if !defined(CONFIG_USER_ONLY)
2787 /* We hard-wire the BSP to the first CPU. */
2788 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2789
2790 s->halted = !cpu_is_bsp(cpu);
2791
2792 if (kvm_enabled()) {
2793 kvm_arch_reset_vcpu(cpu);
2794 }
2795 #endif
2796 }
2797
2798 #ifndef CONFIG_USER_ONLY
2799 bool cpu_is_bsp(X86CPU *cpu)
2800 {
2801 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2802 }
2803
2804 /* TODO: remove me, when reset over QOM tree is implemented */
2805 static void x86_cpu_machine_reset_cb(void *opaque)
2806 {
2807 X86CPU *cpu = opaque;
2808 cpu_reset(CPU(cpu));
2809 }
2810 #endif
2811
2812 static void mce_init(X86CPU *cpu)
2813 {
2814 CPUX86State *cenv = &cpu->env;
2815 unsigned int bank;
2816
2817 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2818 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2819 (CPUID_MCE | CPUID_MCA)) {
2820 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2821 cenv->mcg_ctl = ~(uint64_t)0;
2822 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2823 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2824 }
2825 }
2826 }
2827
2828 #ifndef CONFIG_USER_ONLY
2829 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2830 {
2831 APICCommonState *apic;
2832 const char *apic_type = "apic";
2833
2834 if (kvm_apic_in_kernel()) {
2835 apic_type = "kvm-apic";
2836 } else if (xen_enabled()) {
2837 apic_type = "xen-apic";
2838 }
2839
2840 cpu->apic_state = DEVICE(object_new(apic_type));
2841
2842 object_property_add_child(OBJECT(cpu), "apic",
2843 OBJECT(cpu->apic_state), NULL);
2844 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2845 /* TODO: convert to link<> */
2846 apic = APIC_COMMON(cpu->apic_state);
2847 apic->cpu = cpu;
2848 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2849 }
2850
2851 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2852 {
2853 APICCommonState *apic;
2854 static bool apic_mmio_map_once;
2855
2856 if (cpu->apic_state == NULL) {
2857 return;
2858 }
2859 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2860 errp);
2861
2862 /* Map APIC MMIO area */
2863 apic = APIC_COMMON(cpu->apic_state);
2864 if (!apic_mmio_map_once) {
2865 memory_region_add_subregion_overlap(get_system_memory(),
2866 apic->apicbase &
2867 MSR_IA32_APICBASE_BASE,
2868 &apic->io_memory,
2869 0x1000);
2870 apic_mmio_map_once = true;
2871 }
2872 }
2873
2874 static void x86_cpu_machine_done(Notifier *n, void *unused)
2875 {
2876 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2877 MemoryRegion *smram =
2878 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2879
2880 if (smram) {
2881 cpu->smram = g_new(MemoryRegion, 1);
2882 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2883 smram, 0, 1ull << 32);
2884 memory_region_set_enabled(cpu->smram, false);
2885 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2886 }
2887 }
2888 #else
2889 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2890 {
2891 }
2892 #endif
2893
2894
2895 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2896 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2897 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2898 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2899 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2900 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2901 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2902 {
2903 CPUState *cs = CPU(dev);
2904 X86CPU *cpu = X86_CPU(dev);
2905 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2906 CPUX86State *env = &cpu->env;
2907 Error *local_err = NULL;
2908 static bool ht_warned;
2909 FeatureWord w;
2910
2911 if (cpu->apic_id < 0) {
2912 error_setg(errp, "apic-id property was not initialized properly");
2913 return;
2914 }
2915
2916 /*TODO: cpu->host_features incorrectly overwrites features
2917 * set using "feat=on|off". Once we fix this, we can convert
2918 * plus_features & minus_features to global properties
2919 * inside x86_cpu_parse_featurestr() too.
2920 */
2921 if (cpu->host_features) {
2922 for (w = 0; w < FEATURE_WORDS; w++) {
2923 env->features[w] =
2924 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2925 }
2926 }
2927
2928 for (w = 0; w < FEATURE_WORDS; w++) {
2929 cpu->env.features[w] |= plus_features[w];
2930 cpu->env.features[w] &= ~minus_features[w];
2931 }
2932
2933 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2934 env->cpuid_level = 7;
2935 }
2936
2937 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2938 error_setg(&local_err,
2939 kvm_enabled() ?
2940 "Host doesn't support requested features" :
2941 "TCG doesn't support requested features");
2942 goto out;
2943 }
2944
2945 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2946 * CPUID[1].EDX.
2947 */
2948 if (IS_AMD_CPU(env)) {
2949 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2950 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2951 & CPUID_EXT2_AMD_ALIASES);
2952 }
2953
2954
2955 cpu_exec_init(cs, &error_abort);
2956
2957 if (tcg_enabled()) {
2958 tcg_x86_init();
2959 }
2960
2961 #ifndef CONFIG_USER_ONLY
2962 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2963
2964 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2965 x86_cpu_apic_create(cpu, &local_err);
2966 if (local_err != NULL) {
2967 goto out;
2968 }
2969 }
2970 #endif
2971
2972 mce_init(cpu);
2973
2974 #ifndef CONFIG_USER_ONLY
2975 if (tcg_enabled()) {
2976 AddressSpace *newas = g_new(AddressSpace, 1);
2977
2978 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2979 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2980
2981 /* Outer container... */
2982 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2983 memory_region_set_enabled(cpu->cpu_as_root, true);
2984
2985 /* ... with two regions inside: normal system memory with low
2986 * priority, and...
2987 */
2988 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2989 get_system_memory(), 0, ~0ull);
2990 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2991 memory_region_set_enabled(cpu->cpu_as_mem, true);
2992 address_space_init(newas, cpu->cpu_as_root, "CPU");
2993 cs->num_ases = 1;
2994 cpu_address_space_init(cs, newas, 0);
2995
2996 /* ... SMRAM with higher priority, linked from /machine/smram. */
2997 cpu->machine_done.notify = x86_cpu_machine_done;
2998 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2999 }
3000 #endif
3001
3002 qemu_init_vcpu(cs);
3003
3004 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3005 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3006 * based on inputs (sockets,cores,threads), it is still better to gives
3007 * users a warning.
3008 *
3009 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3010 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3011 */
3012 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3013 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3014 " -smp options properly.");
3015 ht_warned = true;
3016 }
3017
3018 x86_cpu_apic_realize(cpu, &local_err);
3019 if (local_err != NULL) {
3020 goto out;
3021 }
3022 cpu_reset(cs);
3023
3024 xcc->parent_realize(dev, &local_err);
3025
3026 out:
3027 if (local_err != NULL) {
3028 error_propagate(errp, local_err);
3029 return;
3030 }
3031 }
3032
3033 typedef struct BitProperty {
3034 uint32_t *ptr;
3035 uint32_t mask;
3036 } BitProperty;
3037
3038 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3039 void *opaque, Error **errp)
3040 {
3041 BitProperty *fp = opaque;
3042 bool value = (*fp->ptr & fp->mask) == fp->mask;
3043 visit_type_bool(v, name, &value, errp);
3044 }
3045
3046 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3047 void *opaque, Error **errp)
3048 {
3049 DeviceState *dev = DEVICE(obj);
3050 BitProperty *fp = opaque;
3051 Error *local_err = NULL;
3052 bool value;
3053
3054 if (dev->realized) {
3055 qdev_prop_set_after_realize(dev, name, errp);
3056 return;
3057 }
3058
3059 visit_type_bool(v, name, &value, &local_err);
3060 if (local_err) {
3061 error_propagate(errp, local_err);
3062 return;
3063 }
3064
3065 if (value) {
3066 *fp->ptr |= fp->mask;
3067 } else {
3068 *fp->ptr &= ~fp->mask;
3069 }
3070 }
3071
3072 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3073 void *opaque)
3074 {
3075 BitProperty *prop = opaque;
3076 g_free(prop);
3077 }
3078
3079 /* Register a boolean property to get/set a single bit in a uint32_t field.
3080 *
3081 * The same property name can be registered multiple times to make it affect
3082 * multiple bits in the same FeatureWord. In that case, the getter will return
3083 * true only if all bits are set.
3084 */
3085 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3086 const char *prop_name,
3087 uint32_t *field,
3088 int bitnr)
3089 {
3090 BitProperty *fp;
3091 ObjectProperty *op;
3092 uint32_t mask = (1UL << bitnr);
3093
3094 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3095 if (op) {
3096 fp = op->opaque;
3097 assert(fp->ptr == field);
3098 fp->mask |= mask;
3099 } else {
3100 fp = g_new0(BitProperty, 1);
3101 fp->ptr = field;
3102 fp->mask = mask;
3103 object_property_add(OBJECT(cpu), prop_name, "bool",
3104 x86_cpu_get_bit_prop,
3105 x86_cpu_set_bit_prop,
3106 x86_cpu_release_bit_prop, fp, &error_abort);
3107 }
3108 }
3109
3110 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3111 FeatureWord w,
3112 int bitnr)
3113 {
3114 Object *obj = OBJECT(cpu);
3115 int i;
3116 char **names;
3117 FeatureWordInfo *fi = &feature_word_info[w];
3118
3119 if (!fi->feat_names) {
3120 return;
3121 }
3122 if (!fi->feat_names[bitnr]) {
3123 return;
3124 }
3125
3126 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3127
3128 feat2prop(names[0]);
3129 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3130
3131 for (i = 1; names[i]; i++) {
3132 feat2prop(names[i]);
3133 object_property_add_alias(obj, names[i], obj, names[0],
3134 &error_abort);
3135 }
3136
3137 g_strfreev(names);
3138 }
3139
3140 static void x86_cpu_initfn(Object *obj)
3141 {
3142 CPUState *cs = CPU(obj);
3143 X86CPU *cpu = X86_CPU(obj);
3144 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3145 CPUX86State *env = &cpu->env;
3146 FeatureWord w;
3147
3148 cs->env_ptr = env;
3149
3150 object_property_add(obj, "family", "int",
3151 x86_cpuid_version_get_family,
3152 x86_cpuid_version_set_family, NULL, NULL, NULL);
3153 object_property_add(obj, "model", "int",
3154 x86_cpuid_version_get_model,
3155 x86_cpuid_version_set_model, NULL, NULL, NULL);
3156 object_property_add(obj, "stepping", "int",
3157 x86_cpuid_version_get_stepping,
3158 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3159 object_property_add_str(obj, "vendor",
3160 x86_cpuid_get_vendor,
3161 x86_cpuid_set_vendor, NULL);
3162 object_property_add_str(obj, "model-id",
3163 x86_cpuid_get_model_id,
3164 x86_cpuid_set_model_id, NULL);
3165 object_property_add(obj, "tsc-frequency", "int",
3166 x86_cpuid_get_tsc_freq,
3167 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3168 object_property_add(obj, "apic-id", "int",
3169 x86_cpuid_get_apic_id,
3170 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3171 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3172 x86_cpu_get_feature_words,
3173 NULL, NULL, (void *)env->features, NULL);
3174 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3175 x86_cpu_get_feature_words,
3176 NULL, NULL, (void *)cpu->filtered_features, NULL);
3177
3178 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3179
3180 #ifndef CONFIG_USER_ONLY
3181 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3182 cpu->apic_id = -1;
3183 #endif
3184
3185 for (w = 0; w < FEATURE_WORDS; w++) {
3186 int bitnr;
3187
3188 for (bitnr = 0; bitnr < 32; bitnr++) {
3189 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3190 }
3191 }
3192
3193 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3194 }
3195
3196 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3197 {
3198 X86CPU *cpu = X86_CPU(cs);
3199
3200 return cpu->apic_id;
3201 }
3202
3203 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3204 {
3205 X86CPU *cpu = X86_CPU(cs);
3206
3207 return cpu->env.cr[0] & CR0_PG_MASK;
3208 }
3209
3210 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3211 {
3212 X86CPU *cpu = X86_CPU(cs);
3213
3214 cpu->env.eip = value;
3215 }
3216
3217 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3218 {
3219 X86CPU *cpu = X86_CPU(cs);
3220
3221 cpu->env.eip = tb->pc - tb->cs_base;
3222 }
3223
3224 static bool x86_cpu_has_work(CPUState *cs)
3225 {
3226 X86CPU *cpu = X86_CPU(cs);
3227 CPUX86State *env = &cpu->env;
3228
3229 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3230 CPU_INTERRUPT_POLL)) &&
3231 (env->eflags & IF_MASK)) ||
3232 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3233 CPU_INTERRUPT_INIT |
3234 CPU_INTERRUPT_SIPI |
3235 CPU_INTERRUPT_MCE)) ||
3236 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3237 !(env->hflags & HF_SMM_MASK));
3238 }
3239
3240 static Property x86_cpu_properties[] = {
3241 DEFINE_PROP_BOOL("pmu",