target/arm: Convert Neon VCVT fixed-point to gvec
[qemu.git] / target / i386 / cpu.h
1 /*
2 * i386 virtual CPU header
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #ifndef I386_CPU_H
21 #define I386_CPU_H
22
23 #include "sysemu/tcg.h"
24 #include "cpu-qom.h"
25 #include "hyperv-proto.h"
26 #include "exec/cpu-defs.h"
27 #include "qapi/qapi-types-common.h"
28
29 /* The x86 has a strong memory model with some store-after-load re-ordering */
30 #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
31
32 #define KVM_HAVE_MCE_INJECTION 1
33
34 /* Maximum instruction code size */
35 #define TARGET_MAX_INSN_SIZE 16
36
37 /* support for self modifying code even if the modified instruction is
38 close to the modifying instruction */
39 #define TARGET_HAS_PRECISE_SMC
40
41 #ifdef TARGET_X86_64
42 #define I386_ELF_MACHINE EM_X86_64
43 #define ELF_MACHINE_UNAME "x86_64"
44 #else
45 #define I386_ELF_MACHINE EM_386
46 #define ELF_MACHINE_UNAME "i686"
47 #endif
48
49 enum {
50 R_EAX = 0,
51 R_ECX = 1,
52 R_EDX = 2,
53 R_EBX = 3,
54 R_ESP = 4,
55 R_EBP = 5,
56 R_ESI = 6,
57 R_EDI = 7,
58 R_R8 = 8,
59 R_R9 = 9,
60 R_R10 = 10,
61 R_R11 = 11,
62 R_R12 = 12,
63 R_R13 = 13,
64 R_R14 = 14,
65 R_R15 = 15,
66
67 R_AL = 0,
68 R_CL = 1,
69 R_DL = 2,
70 R_BL = 3,
71 R_AH = 4,
72 R_CH = 5,
73 R_DH = 6,
74 R_BH = 7,
75 };
76
77 typedef enum X86Seg {
78 R_ES = 0,
79 R_CS = 1,
80 R_SS = 2,
81 R_DS = 3,
82 R_FS = 4,
83 R_GS = 5,
84 R_LDTR = 6,
85 R_TR = 7,
86 } X86Seg;
87
88 /* segment descriptor fields */
89 #define DESC_G_SHIFT 23
90 #define DESC_G_MASK (1 << DESC_G_SHIFT)
91 #define DESC_B_SHIFT 22
92 #define DESC_B_MASK (1 << DESC_B_SHIFT)
93 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
94 #define DESC_L_MASK (1 << DESC_L_SHIFT)
95 #define DESC_AVL_SHIFT 20
96 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT)
97 #define DESC_P_SHIFT 15
98 #define DESC_P_MASK (1 << DESC_P_SHIFT)
99 #define DESC_DPL_SHIFT 13
100 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
101 #define DESC_S_SHIFT 12
102 #define DESC_S_MASK (1 << DESC_S_SHIFT)
103 #define DESC_TYPE_SHIFT 8
104 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
105 #define DESC_A_MASK (1 << 8)
106
107 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
108 #define DESC_C_MASK (1 << 10) /* code: conforming */
109 #define DESC_R_MASK (1 << 9) /* code: readable */
110
111 #define DESC_E_MASK (1 << 10) /* data: expansion direction */
112 #define DESC_W_MASK (1 << 9) /* data: writable */
113
114 #define DESC_TSS_BUSY_MASK (1 << 9)
115
116 /* eflags masks */
117 #define CC_C 0x0001
118 #define CC_P 0x0004
119 #define CC_A 0x0010
120 #define CC_Z 0x0040
121 #define CC_S 0x0080
122 #define CC_O 0x0800
123
124 #define TF_SHIFT 8
125 #define IOPL_SHIFT 12
126 #define VM_SHIFT 17
127
128 #define TF_MASK 0x00000100
129 #define IF_MASK 0x00000200
130 #define DF_MASK 0x00000400
131 #define IOPL_MASK 0x00003000
132 #define NT_MASK 0x00004000
133 #define RF_MASK 0x00010000
134 #define VM_MASK 0x00020000
135 #define AC_MASK 0x00040000
136 #define VIF_MASK 0x00080000
137 #define VIP_MASK 0x00100000
138 #define ID_MASK 0x00200000
139
140 /* hidden flags - used internally by qemu to represent additional cpu
141 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
142 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
143 positions to ease oring with eflags. */
144 /* current cpl */
145 #define HF_CPL_SHIFT 0
146 /* true if hardware interrupts must be disabled for next instruction */
147 #define HF_INHIBIT_IRQ_SHIFT 3
148 /* 16 or 32 segments */
149 #define HF_CS32_SHIFT 4
150 #define HF_SS32_SHIFT 5
151 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
152 #define HF_ADDSEG_SHIFT 6
153 /* copy of CR0.PE (protected mode) */
154 #define HF_PE_SHIFT 7
155 #define HF_TF_SHIFT 8 /* must be same as eflags */
156 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
157 #define HF_EM_SHIFT 10
158 #define HF_TS_SHIFT 11
159 #define HF_IOPL_SHIFT 12 /* must be same as eflags */
160 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
161 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
162 #define HF_RF_SHIFT 16 /* must be same as eflags */
163 #define HF_VM_SHIFT 17 /* must be same as eflags */
164 #define HF_AC_SHIFT 18 /* must be same as eflags */
165 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
166 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
167 #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */
168 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
169 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */
170 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
171 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
172 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
173
174 #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
175 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
176 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
177 #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
178 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
179 #define HF_PE_MASK (1 << HF_PE_SHIFT)
180 #define HF_TF_MASK (1 << HF_TF_SHIFT)
181 #define HF_MP_MASK (1 << HF_MP_SHIFT)
182 #define HF_EM_MASK (1 << HF_EM_SHIFT)
183 #define HF_TS_MASK (1 << HF_TS_SHIFT)
184 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
185 #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
186 #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
187 #define HF_RF_MASK (1 << HF_RF_SHIFT)
188 #define HF_VM_MASK (1 << HF_VM_SHIFT)
189 #define HF_AC_MASK (1 << HF_AC_SHIFT)
190 #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
191 #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
192 #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT)
193 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
194 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
195 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
196 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
197 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
198
199 /* hflags2 */
200
201 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
202 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
203 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */
204 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
205 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
206 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
207 #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */
208 #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */
209
210 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
211 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
212 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
213 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
214 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
215 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
216 #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT)
217 #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT)
218
219 #define CR0_PE_SHIFT 0
220 #define CR0_MP_SHIFT 1
221
222 #define CR0_PE_MASK (1U << 0)
223 #define CR0_MP_MASK (1U << 1)
224 #define CR0_EM_MASK (1U << 2)
225 #define CR0_TS_MASK (1U << 3)
226 #define CR0_ET_MASK (1U << 4)
227 #define CR0_NE_MASK (1U << 5)
228 #define CR0_WP_MASK (1U << 16)
229 #define CR0_AM_MASK (1U << 18)
230 #define CR0_PG_MASK (1U << 31)
231
232 #define CR4_VME_MASK (1U << 0)
233 #define CR4_PVI_MASK (1U << 1)
234 #define CR4_TSD_MASK (1U << 2)
235 #define CR4_DE_MASK (1U << 3)
236 #define CR4_PSE_MASK (1U << 4)
237 #define CR4_PAE_MASK (1U << 5)
238 #define CR4_MCE_MASK (1U << 6)
239 #define CR4_PGE_MASK (1U << 7)
240 #define CR4_PCE_MASK (1U << 8)
241 #define CR4_OSFXSR_SHIFT 9
242 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
243 #define CR4_OSXMMEXCPT_MASK (1U << 10)
244 #define CR4_LA57_MASK (1U << 12)
245 #define CR4_VMXE_MASK (1U << 13)
246 #define CR4_SMXE_MASK (1U << 14)
247 #define CR4_FSGSBASE_MASK (1U << 16)
248 #define CR4_PCIDE_MASK (1U << 17)
249 #define CR4_OSXSAVE_MASK (1U << 18)
250 #define CR4_SMEP_MASK (1U << 20)
251 #define CR4_SMAP_MASK (1U << 21)
252 #define CR4_PKE_MASK (1U << 22)
253
254 #define DR6_BD (1 << 13)
255 #define DR6_BS (1 << 14)
256 #define DR6_BT (1 << 15)
257 #define DR6_FIXED_1 0xffff0ff0
258
259 #define DR7_GD (1 << 13)
260 #define DR7_TYPE_SHIFT 16
261 #define DR7_LEN_SHIFT 18
262 #define DR7_FIXED_1 0x00000400
263 #define DR7_GLOBAL_BP_MASK 0xaa
264 #define DR7_LOCAL_BP_MASK 0x55
265 #define DR7_MAX_BP 4
266 #define DR7_TYPE_BP_INST 0x0
267 #define DR7_TYPE_DATA_WR 0x1
268 #define DR7_TYPE_IO_RW 0x2
269 #define DR7_TYPE_DATA_RW 0x3
270
271 #define PG_PRESENT_BIT 0
272 #define PG_RW_BIT 1
273 #define PG_USER_BIT 2
274 #define PG_PWT_BIT 3
275 #define PG_PCD_BIT 4
276 #define PG_ACCESSED_BIT 5
277 #define PG_DIRTY_BIT 6
278 #define PG_PSE_BIT 7
279 #define PG_GLOBAL_BIT 8
280 #define PG_PSE_PAT_BIT 12
281 #define PG_PKRU_BIT 59
282 #define PG_NX_BIT 63
283
284 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
285 #define PG_RW_MASK (1 << PG_RW_BIT)
286 #define PG_USER_MASK (1 << PG_USER_BIT)
287 #define PG_PWT_MASK (1 << PG_PWT_BIT)
288 #define PG_PCD_MASK (1 << PG_PCD_BIT)
289 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
290 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
291 #define PG_PSE_MASK (1 << PG_PSE_BIT)
292 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
293 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
294 #define PG_ADDRESS_MASK 0x000ffffffffff000LL
295 #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
296 #define PG_HI_USER_MASK 0x7ff0000000000000LL
297 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
298 #define PG_NX_MASK (1ULL << PG_NX_BIT)
299
300 #define PG_ERROR_W_BIT 1
301
302 #define PG_ERROR_P_MASK 0x01
303 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
304 #define PG_ERROR_U_MASK 0x04
305 #define PG_ERROR_RSVD_MASK 0x08
306 #define PG_ERROR_I_D_MASK 0x10
307 #define PG_ERROR_PK_MASK 0x20
308
309 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
310 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
311 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
312
313 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
314 #define MCE_BANKS_DEF 10
315
316 #define MCG_CAP_BANKS_MASK 0xff
317
318 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
319 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
320 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
321 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */
322
323 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
324
325 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */
326 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
327 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
328 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */
329 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
330 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
331 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
332 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
333 #define MCI_STATUS_AR (1ULL<<55) /* Action required */
334
335 /* MISC register defines */
336 #define MCM_ADDR_SEGOFF 0 /* segment offset */
337 #define MCM_ADDR_LINEAR 1 /* linear address */
338 #define MCM_ADDR_PHYS 2 /* physical address */
339 #define MCM_ADDR_MEM 3 /* memory address */
340 #define MCM_ADDR_GENERIC 7 /* generic */
341
342 #define MSR_IA32_TSC 0x10
343 #define MSR_IA32_APICBASE 0x1b
344 #define MSR_IA32_APICBASE_BSP (1<<8)
345 #define MSR_IA32_APICBASE_ENABLE (1<<11)
346 #define MSR_IA32_APICBASE_EXTD (1 << 10)
347 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
348 #define MSR_IA32_FEATURE_CONTROL 0x0000003a
349 #define MSR_TSC_ADJUST 0x0000003b
350 #define MSR_IA32_SPEC_CTRL 0x48
351 #define MSR_VIRT_SSBD 0xc001011f
352 #define MSR_IA32_PRED_CMD 0x49
353 #define MSR_IA32_UCODE_REV 0x8b
354 #define MSR_IA32_CORE_CAPABILITY 0xcf
355
356 #define MSR_IA32_ARCH_CAPABILITIES 0x10a
357 #define ARCH_CAP_TSX_CTRL_MSR (1<<7)
358
359 #define MSR_IA32_PERF_CAPABILITIES 0x345
360
361 #define MSR_IA32_TSX_CTRL 0x122
362 #define MSR_IA32_TSCDEADLINE 0x6e0
363
364 #define FEATURE_CONTROL_LOCKED (1<<0)
365 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
366 #define FEATURE_CONTROL_LMCE (1<<20)
367
368 #define MSR_P6_PERFCTR0 0xc1
369
370 #define MSR_IA32_SMBASE 0x9e
371 #define MSR_SMI_COUNT 0x34
372 #define MSR_MTRRcap 0xfe
373 #define MSR_MTRRcap_VCNT 8
374 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
375 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
376
377 #define MSR_IA32_SYSENTER_CS 0x174
378 #define MSR_IA32_SYSENTER_ESP 0x175
379 #define MSR_IA32_SYSENTER_EIP 0x176
380
381 #define MSR_MCG_CAP 0x179
382 #define MSR_MCG_STATUS 0x17a
383 #define MSR_MCG_CTL 0x17b
384 #define MSR_MCG_EXT_CTL 0x4d0
385
386 #define MSR_P6_EVNTSEL0 0x186
387
388 #define MSR_IA32_PERF_STATUS 0x198
389
390 #define MSR_IA32_MISC_ENABLE 0x1a0
391 /* Indicates good rep/movs microcode on some processors: */
392 #define MSR_IA32_MISC_ENABLE_DEFAULT 1
393 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
394
395 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
396 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
397
398 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
399
400 #define MSR_MTRRfix64K_00000 0x250
401 #define MSR_MTRRfix16K_80000 0x258
402 #define MSR_MTRRfix16K_A0000 0x259
403 #define MSR_MTRRfix4K_C0000 0x268
404 #define MSR_MTRRfix4K_C8000 0x269
405 #define MSR_MTRRfix4K_D0000 0x26a
406 #define MSR_MTRRfix4K_D8000 0x26b
407 #define MSR_MTRRfix4K_E0000 0x26c
408 #define MSR_MTRRfix4K_E8000 0x26d
409 #define MSR_MTRRfix4K_F0000 0x26e
410 #define MSR_MTRRfix4K_F8000 0x26f
411
412 #define MSR_PAT 0x277
413
414 #define MSR_MTRRdefType 0x2ff
415
416 #define MSR_CORE_PERF_FIXED_CTR0 0x309
417 #define MSR_CORE_PERF_FIXED_CTR1 0x30a
418 #define MSR_CORE_PERF_FIXED_CTR2 0x30b
419 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
420 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
421 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
422 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
423
424 #define MSR_MC0_CTL 0x400
425 #define MSR_MC0_STATUS 0x401
426 #define MSR_MC0_ADDR 0x402
427 #define MSR_MC0_MISC 0x403
428
429 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560
430 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561
431 #define MSR_IA32_RTIT_CTL 0x570
432 #define MSR_IA32_RTIT_STATUS 0x571
433 #define MSR_IA32_RTIT_CR3_MATCH 0x572
434 #define MSR_IA32_RTIT_ADDR0_A 0x580
435 #define MSR_IA32_RTIT_ADDR0_B 0x581
436 #define MSR_IA32_RTIT_ADDR1_A 0x582
437 #define MSR_IA32_RTIT_ADDR1_B 0x583
438 #define MSR_IA32_RTIT_ADDR2_A 0x584
439 #define MSR_IA32_RTIT_ADDR2_B 0x585
440 #define MSR_IA32_RTIT_ADDR3_A 0x586
441 #define MSR_IA32_RTIT_ADDR3_B 0x587
442 #define MAX_RTIT_ADDRS 8
443
444 #define MSR_EFER 0xc0000080
445
446 #define MSR_EFER_SCE (1 << 0)
447 #define MSR_EFER_LME (1 << 8)
448 #define MSR_EFER_LMA (1 << 10)
449 #define MSR_EFER_NXE (1 << 11)
450 #define MSR_EFER_SVME (1 << 12)
451 #define MSR_EFER_FFXSR (1 << 14)
452
453 #define MSR_STAR 0xc0000081
454 #define MSR_LSTAR 0xc0000082
455 #define MSR_CSTAR 0xc0000083
456 #define MSR_FMASK 0xc0000084
457 #define MSR_FSBASE 0xc0000100
458 #define MSR_GSBASE 0xc0000101
459 #define MSR_KERNELGSBASE 0xc0000102
460 #define MSR_TSC_AUX 0xc0000103
461
462 #define MSR_VM_HSAVE_PA 0xc0010117
463
464 #define MSR_IA32_BNDCFGS 0x00000d90
465 #define MSR_IA32_XSS 0x00000da0
466 #define MSR_IA32_UMWAIT_CONTROL 0xe1
467
468 #define MSR_IA32_VMX_BASIC 0x00000480
469 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
470 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
471 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483
472 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
473 #define MSR_IA32_VMX_MISC 0x00000485
474 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486
475 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487
476 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488
477 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489
478 #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
479 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
480 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
481 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
482 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
483 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
484 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
485 #define MSR_IA32_VMX_VMFUNC 0x00000491
486
487 #define XSTATE_FP_BIT 0
488 #define XSTATE_SSE_BIT 1
489 #define XSTATE_YMM_BIT 2
490 #define XSTATE_BNDREGS_BIT 3
491 #define XSTATE_BNDCSR_BIT 4
492 #define XSTATE_OPMASK_BIT 5
493 #define XSTATE_ZMM_Hi256_BIT 6
494 #define XSTATE_Hi16_ZMM_BIT 7
495 #define XSTATE_PKRU_BIT 9
496
497 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
498 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
499 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
500 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
501 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
502 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
503 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
504 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
505 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
506
507 /* CPUID feature words */
508 typedef enum FeatureWord {
509 FEAT_1_EDX, /* CPUID[1].EDX */
510 FEAT_1_ECX, /* CPUID[1].ECX */
511 FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
512 FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
513 FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
514 FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */
515 FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
516 FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
517 FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
518 FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
519 FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
520 FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
521 FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
522 FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */
523 FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */
524 FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */
525 FEAT_HV_RECOMM_EAX, /* CPUID[4000_0004].EAX */
526 FEAT_HV_NESTED_EAX, /* CPUID[4000_000A].EAX */
527 FEAT_SVM, /* CPUID[8000_000A].EDX */
528 FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
529 FEAT_6_EAX, /* CPUID[6].EAX */
530 FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
531 FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
532 FEAT_ARCH_CAPABILITIES,
533 FEAT_CORE_CAPABILITY,
534 FEAT_PERF_CAPABILITIES,
535 FEAT_VMX_PROCBASED_CTLS,
536 FEAT_VMX_SECONDARY_CTLS,
537 FEAT_VMX_PINBASED_CTLS,
538 FEAT_VMX_EXIT_CTLS,
539 FEAT_VMX_ENTRY_CTLS,
540 FEAT_VMX_MISC,
541 FEAT_VMX_EPT_VPID_CAPS,
542 FEAT_VMX_BASIC,
543 FEAT_VMX_VMFUNC,
544 FEATURE_WORDS,
545 } FeatureWord;
546
547 typedef uint64_t FeatureWordArray[FEATURE_WORDS];
548
549 /* cpuid_features bits */
550 #define CPUID_FP87 (1U << 0)
551 #define CPUID_VME (1U << 1)
552 #define CPUID_DE (1U << 2)
553 #define CPUID_PSE (1U << 3)
554 #define CPUID_TSC (1U << 4)
555 #define CPUID_MSR (1U << 5)
556 #define CPUID_PAE (1U << 6)
557 #define CPUID_MCE (1U << 7)
558 #define CPUID_CX8 (1U << 8)
559 #define CPUID_APIC (1U << 9)
560 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */
561 #define CPUID_MTRR (1U << 12)
562 #define CPUID_PGE (1U << 13)
563 #define CPUID_MCA (1U << 14)
564 #define CPUID_CMOV (1U << 15)
565 #define CPUID_PAT (1U << 16)
566 #define CPUID_PSE36 (1U << 17)
567 #define CPUID_PN (1U << 18)
568 #define CPUID_CLFLUSH (1U << 19)
569 #define CPUID_DTS (1U << 21)
570 #define CPUID_ACPI (1U << 22)
571 #define CPUID_MMX (1U << 23)
572 #define CPUID_FXSR (1U << 24)
573 #define CPUID_SSE (1U << 25)
574 #define CPUID_SSE2 (1U << 26)
575 #define CPUID_SS (1U << 27)
576 #define CPUID_HT (1U << 28)
577 #define CPUID_TM (1U << 29)
578 #define CPUID_IA64 (1U << 30)
579 #define CPUID_PBE (1U << 31)
580
581 #define CPUID_EXT_SSE3 (1U << 0)
582 #define CPUID_EXT_PCLMULQDQ (1U << 1)
583 #define CPUID_EXT_DTES64 (1U << 2)
584 #define CPUID_EXT_MONITOR (1U << 3)
585 #define CPUID_EXT_DSCPL (1U << 4)
586 #define CPUID_EXT_VMX (1U << 5)
587 #define CPUID_EXT_SMX (1U << 6)
588 #define CPUID_EXT_EST (1U << 7)
589 #define CPUID_EXT_TM2 (1U << 8)
590 #define CPUID_EXT_SSSE3 (1U << 9)
591 #define CPUID_EXT_CID (1U << 10)
592 #define CPUID_EXT_FMA (1U << 12)
593 #define CPUID_EXT_CX16 (1U << 13)
594 #define CPUID_EXT_XTPR (1U << 14)
595 #define CPUID_EXT_PDCM (1U << 15)
596 #define CPUID_EXT_PCID (1U << 17)
597 #define CPUID_EXT_DCA (1U << 18)
598 #define CPUID_EXT_SSE41 (1U << 19)
599 #define CPUID_EXT_SSE42 (1U << 20)
600 #define CPUID_EXT_X2APIC (1U << 21)
601 #define CPUID_EXT_MOVBE (1U << 22)
602 #define CPUID_EXT_POPCNT (1U << 23)
603 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
604 #define CPUID_EXT_AES (1U << 25)
605 #define CPUID_EXT_XSAVE (1U << 26)
606 #define CPUID_EXT_OSXSAVE (1U << 27)
607 #define CPUID_EXT_AVX (1U << 28)
608 #define CPUID_EXT_F16C (1U << 29)
609 #define CPUID_EXT_RDRAND (1U << 30)
610 #define CPUID_EXT_HYPERVISOR (1U << 31)
611
612 #define CPUID_EXT2_FPU (1U << 0)
613 #define CPUID_EXT2_VME (1U << 1)
614 #define CPUID_EXT2_DE (1U << 2)
615 #define CPUID_EXT2_PSE (1U << 3)
616 #define CPUID_EXT2_TSC (1U << 4)
617 #define CPUID_EXT2_MSR (1U << 5)
618 #define CPUID_EXT2_PAE (1U << 6)
619 #define CPUID_EXT2_MCE (1U << 7)
620 #define CPUID_EXT2_CX8 (1U << 8)
621 #define CPUID_EXT2_APIC (1U << 9)
622 #define CPUID_EXT2_SYSCALL (1U << 11)
623 #define CPUID_EXT2_MTRR (1U << 12)
624 #define CPUID_EXT2_PGE (1U << 13)
625 #define CPUID_EXT2_MCA (1U << 14)
626 #define CPUID_EXT2_CMOV (1U << 15)
627 #define CPUID_EXT2_PAT (1U << 16)
628 #define CPUID_EXT2_PSE36 (1U << 17)
629 #define CPUID_EXT2_MP (1U << 19)
630 #define CPUID_EXT2_NX (1U << 20)
631 #define CPUID_EXT2_MMXEXT (1U << 22)
632 #define CPUID_EXT2_MMX (1U << 23)
633 #define CPUID_EXT2_FXSR (1U << 24)
634 #define CPUID_EXT2_FFXSR (1U << 25)
635 #define CPUID_EXT2_PDPE1GB (1U << 26)
636 #define CPUID_EXT2_RDTSCP (1U << 27)
637 #define CPUID_EXT2_LM (1U << 29)
638 #define CPUID_EXT2_3DNOWEXT (1U << 30)
639 #define CPUID_EXT2_3DNOW (1U << 31)
640
641 /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
642 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
643 CPUID_EXT2_DE | CPUID_EXT2_PSE | \
644 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
645 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
646 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
647 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
648 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
649 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
650 CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
651
652 #define CPUID_EXT3_LAHF_LM (1U << 0)
653 #define CPUID_EXT3_CMP_LEG (1U << 1)
654 #define CPUID_EXT3_SVM (1U << 2)
655 #define CPUID_EXT3_EXTAPIC (1U << 3)
656 #define CPUID_EXT3_CR8LEG (1U << 4)
657 #define CPUID_EXT3_ABM (1U << 5)
658 #define CPUID_EXT3_SSE4A (1U << 6)
659 #define CPUID_EXT3_MISALIGNSSE (1U << 7)
660 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
661 #define CPUID_EXT3_OSVW (1U << 9)
662 #define CPUID_EXT3_IBS (1U << 10)
663 #define CPUID_EXT3_XOP (1U << 11)
664 #define CPUID_EXT3_SKINIT (1U << 12)
665 #define CPUID_EXT3_WDT (1U << 13)
666 #define CPUID_EXT3_LWP (1U << 15)
667 #define CPUID_EXT3_FMA4 (1U << 16)
668 #define CPUID_EXT3_TCE (1U << 17)
669 #define CPUID_EXT3_NODEID (1U << 19)
670 #define CPUID_EXT3_TBM (1U << 21)
671 #define CPUID_EXT3_TOPOEXT (1U << 22)
672 #define CPUID_EXT3_PERFCORE (1U << 23)
673 #define CPUID_EXT3_PERFNB (1U << 24)
674
675 #define CPUID_SVM_NPT (1U << 0)
676 #define CPUID_SVM_LBRV (1U << 1)
677 #define CPUID_SVM_SVMLOCK (1U << 2)
678 #define CPUID_SVM_NRIPSAVE (1U << 3)
679 #define CPUID_SVM_TSCSCALE (1U << 4)
680 #define CPUID_SVM_VMCBCLEAN (1U << 5)
681 #define CPUID_SVM_FLUSHASID (1U << 6)
682 #define CPUID_SVM_DECODEASSIST (1U << 7)
683 #define CPUID_SVM_PAUSEFILTER (1U << 10)
684 #define CPUID_SVM_PFTHRESHOLD (1U << 12)
685
686 /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
687 #define CPUID_7_0_EBX_FSGSBASE (1U << 0)
688 /* 1st Group of Advanced Bit Manipulation Extensions */
689 #define CPUID_7_0_EBX_BMI1 (1U << 3)
690 /* Hardware Lock Elision */
691 #define CPUID_7_0_EBX_HLE (1U << 4)
692 /* Intel Advanced Vector Extensions 2 */
693 #define CPUID_7_0_EBX_AVX2 (1U << 5)
694 /* Supervisor-mode Execution Prevention */
695 #define CPUID_7_0_EBX_SMEP (1U << 7)
696 /* 2nd Group of Advanced Bit Manipulation Extensions */
697 #define CPUID_7_0_EBX_BMI2 (1U << 8)
698 /* Enhanced REP MOVSB/STOSB */
699 #define CPUID_7_0_EBX_ERMS (1U << 9)
700 /* Invalidate Process-Context Identifier */
701 #define CPUID_7_0_EBX_INVPCID (1U << 10)
702 /* Restricted Transactional Memory */
703 #define CPUID_7_0_EBX_RTM (1U << 11)
704 /* Memory Protection Extension */
705 #define CPUID_7_0_EBX_MPX (1U << 14)
706 /* AVX-512 Foundation */
707 #define CPUID_7_0_EBX_AVX512F (1U << 16)
708 /* AVX-512 Doubleword & Quadword Instruction */
709 #define CPUID_7_0_EBX_AVX512DQ (1U << 17)
710 /* Read Random SEED */
711 #define CPUID_7_0_EBX_RDSEED (1U << 18)
712 /* ADCX and ADOX instructions */
713 #define CPUID_7_0_EBX_ADX (1U << 19)
714 /* Supervisor Mode Access Prevention */
715 #define CPUID_7_0_EBX_SMAP (1U << 20)
716 /* AVX-512 Integer Fused Multiply Add */
717 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21)
718 /* Persistent Commit */
719 #define CPUID_7_0_EBX_PCOMMIT (1U << 22)
720 /* Flush a Cache Line Optimized */
721 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23)
722 /* Cache Line Write Back */
723 #define CPUID_7_0_EBX_CLWB (1U << 24)
724 /* Intel Processor Trace */
725 #define CPUID_7_0_EBX_INTEL_PT (1U << 25)
726 /* AVX-512 Prefetch */
727 #define CPUID_7_0_EBX_AVX512PF (1U << 26)
728 /* AVX-512 Exponential and Reciprocal */
729 #define CPUID_7_0_EBX_AVX512ER (1U << 27)
730 /* AVX-512 Conflict Detection */
731 #define CPUID_7_0_EBX_AVX512CD (1U << 28)
732 /* SHA1/SHA256 Instruction Extensions */
733 #define CPUID_7_0_EBX_SHA_NI (1U << 29)
734 /* AVX-512 Byte and Word Instructions */
735 #define CPUID_7_0_EBX_AVX512BW (1U << 30)
736 /* AVX-512 Vector Length Extensions */
737 #define CPUID_7_0_EBX_AVX512VL (1U << 31)
738
739 /* AVX-512 Vector Byte Manipulation Instruction */
740 #define CPUID_7_0_ECX_AVX512_VBMI (1U << 1)
741 /* User-Mode Instruction Prevention */
742 #define CPUID_7_0_ECX_UMIP (1U << 2)
743 /* Protection Keys for User-mode Pages */
744 #define CPUID_7_0_ECX_PKU (1U << 3)
745 /* OS Enable Protection Keys */
746 #define CPUID_7_0_ECX_OSPKE (1U << 4)
747 /* UMONITOR/UMWAIT/TPAUSE Instructions */
748 #define CPUID_7_0_ECX_WAITPKG (1U << 5)
749 /* Additional AVX-512 Vector Byte Manipulation Instruction */
750 #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6)
751 /* Galois Field New Instructions */
752 #define CPUID_7_0_ECX_GFNI (1U << 8)
753 /* Vector AES Instructions */
754 #define CPUID_7_0_ECX_VAES (1U << 9)
755 /* Carry-Less Multiplication Quadword */
756 #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
757 /* Vector Neural Network Instructions */
758 #define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
759 /* Support for VPOPCNT[B,W] and VPSHUFBITQMB */
760 #define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
761 /* POPCNT for vectors of DW/QW */
762 #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14)
763 /* 5-level Page Tables */
764 #define CPUID_7_0_ECX_LA57 (1U << 16)
765 /* Read Processor ID */
766 #define CPUID_7_0_ECX_RDPID (1U << 22)
767 /* Cache Line Demote Instruction */
768 #define CPUID_7_0_ECX_CLDEMOTE (1U << 25)
769 /* Move Doubleword as Direct Store Instruction */
770 #define CPUID_7_0_ECX_MOVDIRI (1U << 27)
771 /* Move 64 Bytes as Direct Store Instruction */
772 #define CPUID_7_0_ECX_MOVDIR64B (1U << 28)
773
774 /* AVX512 Neural Network Instructions */
775 #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2)
776 /* AVX512 Multiply Accumulation Single Precision */
777 #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
778 /* Fast Short Rep Mov */
779 #define CPUID_7_0_EDX_FSRM (1U << 4)
780 /* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
781 #define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
782 /* SERIALIZE instruction */
783 #define CPUID_7_0_EDX_SERIALIZE (1U << 14)
784 /* TSX Suspend Load Address Tracking instruction */
785 #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16)
786 /* Speculation Control */
787 #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
788 /* Single Thread Indirect Branch Predictors */
789 #define CPUID_7_0_EDX_STIBP (1U << 27)
790 /* Arch Capabilities */
791 #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29)
792 /* Core Capability */
793 #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30)
794 /* Speculative Store Bypass Disable */
795 #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31)
796
797 /* AVX512 BFloat16 Instruction */
798 #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
799
800 /* CLZERO instruction */
801 #define CPUID_8000_0008_EBX_CLZERO (1U << 0)
802 /* Always save/restore FP error pointers */
803 #define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2)
804 /* Write back and do not invalidate cache */
805 #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9)
806 /* Indirect Branch Prediction Barrier */
807 #define CPUID_8000_0008_EBX_IBPB (1U << 12)
808 /* Single Thread Indirect Branch Predictors */
809 #define CPUID_8000_0008_EBX_STIBP (1U << 15)
810
811 #define CPUID_XSAVE_XSAVEOPT (1U << 0)
812 #define CPUID_XSAVE_XSAVEC (1U << 1)
813 #define CPUID_XSAVE_XGETBV1 (1U << 2)
814 #define CPUID_XSAVE_XSAVES (1U << 3)
815
816 #define CPUID_6_EAX_ARAT (1U << 2)
817
818 /* CPUID[0x80000007].EDX flags: */
819 #define CPUID_APM_INVTSC (1U << 8)
820
821 #define CPUID_VENDOR_SZ 12
822
823 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
824 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
825 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
826 #define CPUID_VENDOR_INTEL "GenuineIntel"
827
828 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
829 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
830 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
831 #define CPUID_VENDOR_AMD "AuthenticAMD"
832
833 #define CPUID_VENDOR_VIA "CentaurHauls"
834
835 #define CPUID_VENDOR_HYGON "HygonGenuine"
836
837 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
838 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
839 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
840 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
841 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
842 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
843
844 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
845 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
846
847 /* CPUID[0xB].ECX level types */
848 #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8)
849 #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8)
850 #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8)
851 #define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8)
852
853 /* MSR Feature Bits */
854 #define MSR_ARCH_CAP_RDCL_NO (1U << 0)
855 #define MSR_ARCH_CAP_IBRS_ALL (1U << 1)
856 #define MSR_ARCH_CAP_RSBA (1U << 2)
857 #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3)
858 #define MSR_ARCH_CAP_SSB_NO (1U << 4)
859 #define MSR_ARCH_CAP_MDS_NO (1U << 5)
860 #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6)
861 #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7)
862 #define MSR_ARCH_CAP_TAA_NO (1U << 8)
863
864 #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)
865
866 /* VMX MSR features */
867 #define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull
868 #define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32)
869 #define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32)
870 #define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49)
871 #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54)
872 #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55)
873
874 #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full
875 #define MSR_VMX_MISC_STORE_LMA (1ULL << 5)
876 #define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6)
877 #define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7)
878 #define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8)
879 #define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull
880 #define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29)
881 #define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30)
882
883 #define MSR_VMX_EPT_EXECONLY (1ULL << 0)
884 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6)
885 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7)
886 #define MSR_VMX_EPT_UC (1ULL << 8)
887 #define MSR_VMX_EPT_WB (1ULL << 14)
888 #define MSR_VMX_EPT_2MB (1ULL << 16)
889 #define MSR_VMX_EPT_1GB (1ULL << 17)
890 #define MSR_VMX_EPT_INVEPT (1ULL << 20)
891 #define MSR_VMX_EPT_AD_BITS (1ULL << 21)
892 #define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22)
893 #define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25)
894 #define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26)
895 #define MSR_VMX_EPT_INVVPID (1ULL << 32)
896 #define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40)
897 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41)
898 #define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42)
899 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43)
900
901 #define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0)
902
903
904 /* VMX controls */
905 #define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
906 #define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008
907 #define VMX_CPU_BASED_HLT_EXITING 0x00000080
908 #define VMX_CPU_BASED_INVLPG_EXITING 0x00000200
909 #define VMX_CPU_BASED_MWAIT_EXITING 0x00000400
910 #define VMX_CPU_BASED_RDPMC_EXITING 0x00000800
911 #define VMX_CPU_BASED_RDTSC_EXITING 0x00001000
912 #define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000
913 #define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000
914 #define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000
915 #define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000
916 #define VMX_CPU_BASED_TPR_SHADOW 0x00200000
917 #define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
918 #define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000
919 #define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000
920 #define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000
921 #define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
922 #define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000
923 #define VMX_CPU_BASED_MONITOR_EXITING 0x20000000
924 #define VMX_CPU_BASED_PAUSE_EXITING 0x40000000
925 #define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
926
927 #define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
928 #define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002
929 #define VMX_SECONDARY_EXEC_DESC 0x00000004
930 #define VMX_SECONDARY_EXEC_RDTSCP 0x00000008
931 #define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
932 #define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020
933 #define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040
934 #define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
935 #define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
936 #define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
937 #define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
938 #define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800
939 #define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
940 #define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
941 #define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000
942 #define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000
943 #define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000
944 #define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000
945 #define VMX_SECONDARY_EXEC_XSAVES 0x00100000
946
947 #define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001
948 #define VMX_PIN_BASED_NMI_EXITING 0x00000008
949 #define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020
950 #define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
951 #define VMX_PIN_BASED_POSTED_INTR 0x00000080
952
953 #define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
954 #define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
955 #define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
956 #define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
957 #define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000
958 #define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000
959 #define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000
960 #define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000
961 #define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
962 #define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000
963 #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000
964 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
965
966 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
967 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200
968 #define VMX_VM_ENTRY_SMM 0x00000400
969 #define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
970 #define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
971 #define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000
972 #define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000
973 #define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000
974 #define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000
975 #define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
976
977 /* Supported Hyper-V Enlightenments */
978 #define HYPERV_FEAT_RELAXED 0
979 #define HYPERV_FEAT_VAPIC 1
980 #define HYPERV_FEAT_TIME 2
981 #define HYPERV_FEAT_CRASH 3
982 #define HYPERV_FEAT_RESET 4
983 #define HYPERV_FEAT_VPINDEX 5
984 #define HYPERV_FEAT_RUNTIME 6
985 #define HYPERV_FEAT_SYNIC 7
986 #define HYPERV_FEAT_STIMER 8
987 #define HYPERV_FEAT_FREQUENCIES 9
988 #define HYPERV_FEAT_REENLIGHTENMENT 10
989 #define HYPERV_FEAT_TLBFLUSH 11
990 #define HYPERV_FEAT_EVMCS 12
991 #define HYPERV_FEAT_IPI 13
992 #define HYPERV_FEAT_STIMER_DIRECT 14
993
994 #ifndef HYPERV_SPINLOCK_NEVER_RETRY
995 #define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
996 #endif
997
998 #define EXCP00_DIVZ 0
999 #define EXCP01_DB 1
1000 #define EXCP02_NMI 2
1001 #define EXCP03_INT3 3
1002 #define EXCP04_INTO 4
1003 #define EXCP05_BOUND 5
1004 #define EXCP06_ILLOP 6
1005 #define EXCP07_PREX 7
1006 #define EXCP08_DBLE 8
1007 #define EXCP09_XERR 9
1008 #define EXCP0A_TSS 10
1009 #define EXCP0B_NOSEG 11
1010 #define EXCP0C_STACK 12
1011 #define EXCP0D_GPF 13
1012 #define EXCP0E_PAGE 14
1013 #define EXCP10_COPR 16
1014 #define EXCP11_ALGN 17
1015 #define EXCP12_MCHK 18
1016
1017 #define EXCP_VMEXIT 0x100 /* only for system emulation */
1018 #define EXCP_SYSCALL 0x101 /* only for user emulation */
1019 #define EXCP_VSYSCALL 0x102 /* only for user emulation */
1020
1021 /* i386-specific interrupt pending bits. */
1022 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
1023 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
1024 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
1025 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
1026 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
1027 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
1028 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
1029
1030 /* Use a clearer name for this. */
1031 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
1032
1033 /* Instead of computing the condition codes after each x86 instruction,
1034 * QEMU just stores one operand (called CC_SRC), the result
1035 * (called CC_DST) and the type of operation (called CC_OP). When the
1036 * condition codes are needed, the condition codes can be calculated
1037 * using this information. Condition codes are not generated if they
1038 * are only needed for conditional branches.
1039 */
1040 typedef enum {
1041 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
1042 CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
1043
1044 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
1045 CC_OP_MULW,
1046 CC_OP_MULL,
1047 CC_OP_MULQ,
1048
1049 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1050 CC_OP_ADDW,
1051 CC_OP_ADDL,
1052 CC_OP_ADDQ,
1053
1054 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1055 CC_OP_ADCW,
1056 CC_OP_ADCL,
1057 CC_OP_ADCQ,
1058
1059 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1060 CC_OP_SUBW,
1061 CC_OP_SUBL,
1062 CC_OP_SUBQ,
1063
1064 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1065 CC_OP_SBBW,
1066 CC_OP_SBBL,
1067 CC_OP_SBBQ,
1068
1069 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
1070 CC_OP_LOGICW,
1071 CC_OP_LOGICL,
1072 CC_OP_LOGICQ,
1073
1074 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
1075 CC_OP_INCW,
1076 CC_OP_INCL,
1077 CC_OP_INCQ,
1078
1079 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
1080 CC_OP_DECW,
1081 CC_OP_DECL,
1082 CC_OP_DECQ,
1083
1084 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
1085 CC_OP_SHLW,
1086 CC_OP_SHLL,
1087 CC_OP_SHLQ,
1088
1089 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
1090 CC_OP_SARW,
1091 CC_OP_SARL,
1092 CC_OP_SARQ,
1093
1094 CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
1095 CC_OP_BMILGW,
1096 CC_OP_BMILGL,
1097 CC_OP_BMILGQ,
1098
1099 CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
1100 CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */
1101 CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
1102
1103 CC_OP_CLR, /* Z set, all other flags clear. */
1104 CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */
1105
1106 CC_OP_NB,
1107 } CCOp;
1108
1109 typedef struct SegmentCache {
1110 uint32_t selector;
1111 target_ulong base;
1112 uint32_t limit;
1113 uint32_t flags;
1114 } SegmentCache;
1115
1116 #define MMREG_UNION(n, bits) \
1117 union n { \
1118 uint8_t _b_##n[(bits)/8]; \
1119 uint16_t _w_##n[(bits)/16]; \
1120 uint32_t _l_##n[(bits)/32]; \
1121 uint64_t _q_##n[(bits)/64]; \
1122 float32 _s_##n[(bits)/32]; \
1123 float64 _d_##n[(bits)/64]; \
1124 }
1125
1126 typedef union {
1127 uint8_t _b[16];
1128 uint16_t _w[8];
1129 uint32_t _l[4];
1130 uint64_t _q[2];
1131 } XMMReg;
1132
1133 typedef union {
1134 uint8_t _b[32];
1135 uint16_t _w[16];
1136 uint32_t _l[8];
1137 uint64_t _q[4];
1138 } YMMReg;
1139
1140 typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
1141 typedef MMREG_UNION(MMXReg, 64) MMXReg;
1142
1143 typedef struct BNDReg {
1144 uint64_t lb;
1145 uint64_t ub;
1146 } BNDReg;
1147
1148 typedef struct BNDCSReg {
1149 uint64_t cfgu;
1150 uint64_t sts;
1151 } BNDCSReg;
1152
1153 #define BNDCFG_ENABLE 1ULL
1154 #define BNDCFG_BNDPRESERVE 2ULL
1155 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
1156
1157 #ifdef HOST_WORDS_BIGENDIAN
1158 #define ZMM_B(n) _b_ZMMReg[63 - (n)]
1159 #define ZMM_W(n) _w_ZMMReg[31 - (n)]
1160 #define ZMM_L(n) _l_ZMMReg[15 - (n)]
1161 #define ZMM_S(n) _s_ZMMReg[15 - (n)]
1162 #define ZMM_Q(n) _q_ZMMReg[7 - (n)]
1163 #define ZMM_D(n) _d_ZMMReg[7 - (n)]
1164
1165 #define MMX_B(n) _b_MMXReg[7 - (n)]
1166 #define MMX_W(n) _w_MMXReg[3 - (n)]
1167 #define MMX_L(n) _l_MMXReg[1 - (n)]
1168 #define MMX_S(n) _s_MMXReg[1 - (n)]
1169 #else
1170 #define ZMM_B(n) _b_ZMMReg[n]
1171 #define ZMM_W(n) _w_ZMMReg[n]
1172 #define ZMM_L(n) _l_ZMMReg[n]
1173 #define ZMM_S(n) _s_ZMMReg[n]
1174 #define ZMM_Q(n) _q_ZMMReg[n]
1175 #define ZMM_D(n) _d_ZMMReg[n]
1176
1177 #define MMX_B(n) _b_MMXReg[n]
1178 #define MMX_W(n) _w_MMXReg[n]
1179 #define MMX_L(n) _l_MMXReg[n]
1180 #define MMX_S(n) _s_MMXReg[n]
1181 #endif
1182 #define MMX_Q(n) _q_MMXReg[n]
1183
1184 typedef union {
1185 floatx80 d __attribute__((aligned(16)));
1186 MMXReg mmx;
1187 } FPReg;
1188
1189 typedef struct {
1190 uint64_t base;
1191 uint64_t mask;
1192 } MTRRVar;
1193
1194 #define CPU_NB_REGS64 16
1195 #define CPU_NB_REGS32 8
1196
1197 #ifdef TARGET_X86_64
1198 #define CPU_NB_REGS CPU_NB_REGS64
1199 #else
1200 #define CPU_NB_REGS CPU_NB_REGS32
1201 #endif
1202
1203 #define MAX_FIXED_COUNTERS 3
1204 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
1205
1206 #define TARGET_INSN_START_EXTRA_WORDS 1
1207
1208 #define NB_OPMASK_REGS 8
1209
1210 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
1211 * that APIC ID hasn't been set yet
1212 */
1213 #define UNASSIGNED_APIC_ID 0xFFFFFFFF
1214
1215 typedef union X86LegacyXSaveArea {
1216 struct {
1217 uint16_t fcw;
1218 uint16_t fsw;
1219 uint8_t ftw;
1220 uint8_t reserved;
1221 uint16_t fpop;
1222 uint64_t fpip;
1223 uint64_t fpdp;
1224 uint32_t mxcsr;
1225 uint32_t mxcsr_mask;
1226 FPReg fpregs[8];
1227 uint8_t xmm_regs[16][16];
1228 };
1229 uint8_t data[512];
1230 } X86LegacyXSaveArea;
1231
1232 typedef struct X86XSaveHeader {
1233 uint64_t xstate_bv;
1234 uint64_t xcomp_bv;
1235 uint64_t reserve0;
1236 uint8_t reserved[40];
1237 } X86XSaveHeader;
1238
1239 /* Ext. save area 2: AVX State */
1240 typedef struct XSaveAVX {
1241 uint8_t ymmh[16][16];
1242 } XSaveAVX;
1243
1244 /* Ext. save area 3: BNDREG */
1245 typedef struct XSaveBNDREG {
1246 BNDReg bnd_regs[4];
1247 } XSaveBNDREG;
1248
1249 /* Ext. save area 4: BNDCSR */
1250 typedef union XSaveBNDCSR {
1251 BNDCSReg bndcsr;
1252 uint8_t data[64];
1253 } XSaveBNDCSR;
1254
1255 /* Ext. save area 5: Opmask */
1256 typedef struct XSaveOpmask {
1257 uint64_t opmask_regs[NB_OPMASK_REGS];
1258 } XSaveOpmask;
1259
1260 /* Ext. save area 6: ZMM_Hi256 */
1261 typedef struct XSaveZMM_Hi256 {
1262 uint8_t zmm_hi256[16][32];
1263 } XSaveZMM_Hi256;
1264
1265 /* Ext. save area 7: Hi16_ZMM */
1266 typedef struct XSaveHi16_ZMM {
1267 uint8_t hi16_zmm[16][64];
1268 } XSaveHi16_ZMM;
1269
1270 /* Ext. save area 9: PKRU state */
1271 typedef struct XSavePKRU {
1272 uint32_t pkru;
1273 uint32_t padding;
1274 } XSavePKRU;
1275
1276 typedef struct X86XSaveArea {
1277 X86LegacyXSaveArea legacy;
1278 X86XSaveHeader header;
1279
1280 /* Extended save areas: */
1281
1282 /* AVX State: */
1283 XSaveAVX avx_state;
1284 uint8_t padding[960 - 576 - sizeof(XSaveAVX)];
1285 /* MPX State: */
1286 XSaveBNDREG bndreg_state;
1287 XSaveBNDCSR bndcsr_state;
1288 /* AVX-512 State: */
1289 XSaveOpmask opmask_state;
1290 XSaveZMM_Hi256 zmm_hi256_state;
1291 XSaveHi16_ZMM hi16_zmm_state;
1292 /* PKRU State: */
1293 XSavePKRU pkru_state;
1294 } X86XSaveArea;
1295
1296 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240);
1297 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
1298 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0);
1299 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
1300 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400);
1301 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
1302 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440);
1303 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
1304 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480);
1305 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
1306 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680);
1307 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
1308 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80);
1309 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
1310
1311 typedef enum TPRAccess {
1312 TPR_ACCESS_READ,
1313 TPR_ACCESS_WRITE,
1314 } TPRAccess;
1315
1316 /* Cache information data structures: */
1317
1318 enum CacheType {
1319 DATA_CACHE,
1320 INSTRUCTION_CACHE,
1321 UNIFIED_CACHE
1322 };
1323
1324 typedef struct CPUCacheInfo {
1325 enum CacheType type;
1326 uint8_t level;
1327 /* Size in bytes */
1328 uint32_t size;
1329 /* Line size, in bytes */
1330 uint16_t line_size;
1331 /*
1332 * Associativity.
1333 * Note: representation of fully-associative caches is not implemented
1334 */
1335 uint8_t associativity;
1336 /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */
1337 uint8_t partitions;
1338 /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */
1339 uint32_t sets;
1340 /*
1341 * Lines per tag.
1342 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
1343 * (Is this synonym to @partitions?)
1344 */
1345 uint8_t lines_per_tag;
1346
1347 /* Self-initializing cache */
1348 bool self_init;
1349 /*
1350 * WBINVD/INVD is not guaranteed to act upon lower level caches of
1351 * non-originating threads sharing this cache.
1352 * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0]
1353 */
1354 bool no_invd_sharing;
1355 /*
1356 * Cache is inclusive of lower cache levels.
1357 * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1].
1358 */
1359 bool inclusive;
1360 /*
1361 * A complex function is used to index the cache, potentially using all
1362 * address bits. CPUID[4].EDX[bit 2].
1363 */
1364 bool complex_indexing;
1365 } CPUCacheInfo;
1366
1367
1368 typedef struct CPUCaches {
1369 CPUCacheInfo *l1d_cache;
1370 CPUCacheInfo *l1i_cache;
1371 CPUCacheInfo *l2_cache;
1372 CPUCacheInfo *l3_cache;
1373 } CPUCaches;
1374
1375 typedef struct HVFX86LazyFlags {
1376 target_ulong result;
1377 target_ulong auxbits;
1378 } HVFX86LazyFlags;
1379
1380 typedef struct CPUX86State {
1381 /* standard registers */
1382 target_ulong regs[CPU_NB_REGS];
1383 target_ulong eip;
1384 target_ulong eflags; /* eflags register. During CPU emulation, CC
1385 flags and DF are set to zero because they are
1386 stored elsewhere */
1387
1388 /* emulator internal eflags handling */
1389 target_ulong cc_dst;
1390 target_ulong cc_src;
1391 target_ulong cc_src2;
1392 uint32_t cc_op;
1393 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
1394 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
1395 are known at translation time. */
1396 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
1397
1398 /* segments */
1399 SegmentCache segs[6]; /* selector values */
1400 SegmentCache ldt;
1401 SegmentCache tr;
1402 SegmentCache gdt; /* only base and limit are used */
1403 SegmentCache idt; /* only base and limit are used */
1404
1405 target_ulong cr[5]; /* NOTE: cr1 is unused */
1406 int32_t a20_mask;
1407
1408 BNDReg bnd_regs[4];
1409 BNDCSReg bndcs_regs;
1410 uint64_t msr_bndcfgs;
1411 uint64_t efer;
1412
1413 /* Beginning of state preserved by INIT (dummy marker). */
1414 struct {} start_init_save;
1415
1416 /* FPU state */
1417 unsigned int fpstt; /* top of stack index */
1418 uint16_t fpus;
1419 uint16_t fpuc;
1420 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
1421 FPReg fpregs[8];
1422 /* KVM-only so far */
1423 uint16_t fpop;
1424 uint64_t fpip;
1425 uint64_t fpdp;
1426
1427 /* emulator internal variables */
1428 float_status fp_status;
1429 floatx80 ft0;
1430
1431 float_status mmx_status; /* for 3DNow! float ops */
1432 float_status sse_status;
1433 uint32_t mxcsr;
1434 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
1435 ZMMReg xmm_t0;
1436 MMXReg mmx_t0;
1437
1438 XMMReg ymmh_regs[CPU_NB_REGS];
1439
1440 uint64_t opmask_regs[NB_OPMASK_REGS];
1441 YMMReg zmmh_regs[CPU_NB_REGS];
1442 ZMMReg hi16_zmm_regs[CPU_NB_REGS];
1443
1444 /* sysenter registers */
1445 uint32_t sysenter_cs;
1446 target_ulong sysenter_esp;
1447 target_ulong sysenter_eip;
1448 uint64_t star;
1449
1450 uint64_t vm_hsave;
1451
1452 #ifdef TARGET_X86_64
1453 target_ulong lstar;
1454 target_ulong cstar;
1455 target_ulong fmask;
1456 target_ulong kernelgsbase;
1457 #endif
1458
1459 uint64_t tsc;
1460 uint64_t tsc_adjust;
1461 uint64_t tsc_deadline;
1462 uint64_t tsc_aux;
1463
1464 uint64_t xcr0;
1465
1466 uint64_t mcg_status;
1467 uint64_t msr_ia32_misc_enable;
1468 uint64_t msr_ia32_feature_control;
1469
1470 uint64_t msr_fixed_ctr_ctrl;
1471 uint64_t msr_global_ctrl;
1472 uint64_t msr_global_status;
1473 uint64_t msr_global_ovf_ctrl;
1474 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
1475 uint64_t msr_gp_counters[MAX_GP_COUNTERS];
1476 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
1477
1478 uint64_t pat;
1479 uint32_t smbase;
1480 uint64_t msr_smi_count;
1481
1482 uint32_t pkru;
1483 uint32_t tsx_ctrl;
1484
1485 uint64_t spec_ctrl;
1486 uint64_t virt_ssbd;
1487
1488 /* End of state preserved by INIT (dummy marker). */
1489 struct {} end_init_save;
1490
1491 uint64_t system_time_msr;
1492 uint64_t wall_clock_msr;
1493 uint64_t steal_time_msr;
1494 uint64_t async_pf_en_msr;
1495 uint64_t pv_eoi_en_msr;
1496 uint64_t poll_control_msr;
1497
1498 /* Partition-wide HV MSRs, will be updated only on the first vcpu */
1499 uint64_t msr_hv_hypercall;
1500 uint64_t msr_hv_guest_os_id;
1501 uint64_t msr_hv_tsc;
1502
1503 /* Per-VCPU HV MSRs */
1504 uint64_t msr_hv_vapic;
1505 uint64_t msr_hv_crash_params[HV_CRASH_PARAMS];
1506 uint64_t msr_hv_runtime;
1507 uint64_t msr_hv_synic_control;
1508 uint64_t msr_hv_synic_evt_page;
1509 uint64_t msr_hv_synic_msg_page;
1510 uint64_t msr_hv_synic_sint[HV_SINT_COUNT];
1511 uint64_t msr_hv_stimer_config[HV_STIMER_COUNT];
1512 uint64_t msr_hv_stimer_count[HV_STIMER_COUNT];
1513 uint64_t msr_hv_reenlightenment_control;
1514 uint64_t msr_hv_tsc_emulation_control;
1515 uint64_t msr_hv_tsc_emulation_status;
1516
1517 uint64_t msr_rtit_ctrl;
1518 uint64_t msr_rtit_status;
1519 uint64_t msr_rtit_output_base;
1520 uint64_t msr_rtit_output_mask;
1521 uint64_t msr_rtit_cr3_match;
1522 uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS];
1523
1524 /* exception/interrupt handling */
1525 int error_code;
1526 int exception_is_int;
1527 target_ulong exception_next_eip;
1528 target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
1529 union {
1530 struct CPUBreakpoint *cpu_breakpoint[4];
1531 struct CPUWatchpoint *cpu_watchpoint[4];
1532 }; /* break/watchpoints for dr[0..3] */
1533 int old_exception; /* exception in flight */
1534
1535 uint64_t vm_vmcb;
1536 uint64_t tsc_offset;
1537 uint64_t intercept;
1538 uint16_t intercept_cr_read;
1539 uint16_t intercept_cr_write;
1540 uint16_t intercept_dr_read;
1541 uint16_t intercept_dr_write;
1542 uint32_t intercept_exceptions;
1543 uint64_t nested_cr3;
1544 uint32_t nested_pg_mode;
1545 uint8_t v_tpr;
1546
1547 /* KVM states, automatically cleared on reset */
1548 uint8_t nmi_injected;
1549 uint8_t nmi_pending;
1550
1551 uintptr_t retaddr;
1552
1553 /* Fields up to this point are cleared by a CPU reset */
1554 struct {} end_reset_fields;
1555
1556 /* Fields after this point are preserved across CPU reset. */
1557
1558 /* processor features (e.g. for CPUID insn) */
1559 /* Minimum cpuid leaf 7 value */
1560 uint32_t cpuid_level_func7;
1561 /* Actual cpuid leaf 7 value */
1562 uint32_t cpuid_min_level_func7;
1563 /* Minimum level/xlevel/xlevel2, based on CPU model + features */
1564 uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
1565 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
1566 uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
1567 /* Actual level/xlevel/xlevel2 value: */
1568 uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
1569 uint32_t cpuid_vendor1;
1570 uint32_t cpuid_vendor2;
1571 uint32_t cpuid_vendor3;
1572 uint32_t cpuid_version;
1573 FeatureWordArray features;
1574 /* Features that were explicitly enabled/disabled */
1575 FeatureWordArray user_features;
1576 uint32_t cpuid_model[12];
1577 /* Cache information for CPUID. When legacy-cache=on, the cache data
1578 * on each CPUID leaf will be different, because we keep compatibility
1579 * with old QEMU versions.
1580 */
1581 CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd;
1582
1583 /* MTRRs */
1584 uint64_t mtrr_fixed[11];
1585 uint64_t mtrr_deftype;
1586 MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
1587
1588 /* For KVM */
1589 uint32_t mp_state;
1590 int32_t exception_nr;
1591 int32_t interrupt_injected;
1592 uint8_t soft_interrupt;
1593 uint8_t exception_pending;
1594 uint8_t exception_injected;
1595 uint8_t has_error_code;
1596 uint8_t exception_has_payload;
1597 uint64_t exception_payload;
1598 uint32_t ins_len;
1599 uint32_t sipi_vector;
1600 bool tsc_valid;
1601 int64_t tsc_khz;
1602 int64_t user_tsc_khz; /* for sanity check only */
1603 uint64_t apic_bus_freq;
1604 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
1605 void *xsave_buf;
1606 #endif
1607 #if defined(CONFIG_KVM)
1608 struct kvm_nested_state *nested_state;
1609 #endif
1610 #if defined(CONFIG_HVF)
1611 HVFX86LazyFlags hvf_lflags;
1612 void *hvf_mmio_buf;
1613 #endif
1614
1615 uint64_t mcg_cap;
1616 uint64_t mcg_ctl;
1617 uint64_t mcg_ext_ctl;
1618 uint64_t mce_banks[MCE_BANKS_DEF*4];
1619 uint64_t xstate_bv;
1620
1621 /* vmstate */
1622 uint16_t fpus_vmstate;
1623 uint16_t fptag_vmstate;
1624 uint16_t fpregs_format_vmstate;
1625
1626 uint64_t xss;
1627 uint32_t umwait;
1628
1629 TPRAccess tpr_access_type;
1630
1631 unsigned nr_dies;
1632 unsigned nr_nodes;
1633 unsigned pkg_offset;
1634 } CPUX86State;
1635
1636 struct kvm_msrs;
1637
1638 /**
1639 * X86CPU:
1640 * @env: #CPUX86State
1641 * @migratable: If set, only migratable flags will be accepted when "enforce"
1642 * mode is used, and only migratable flags will be included in the "host"
1643 * CPU model.
1644 *
1645 * An x86 CPU.
1646 */
1647 struct X86CPU {
1648 /*< private >*/
1649 CPUState parent_obj;
1650 /*< public >*/
1651
1652 CPUNegativeOffsetState neg;
1653 CPUX86State env;
1654 VMChangeStateEntry *vmsentry;
1655
1656 uint64_t ucode_rev;
1657
1658 uint32_t hyperv_spinlock_attempts;
1659 char *hyperv_vendor_id;
1660 bool hyperv_synic_kvm_only;
1661 uint64_t hyperv_features;
1662 bool hyperv_passthrough;
1663 OnOffAuto hyperv_no_nonarch_cs;
1664
1665 bool check_cpuid;
1666 bool enforce_cpuid;
1667 /*
1668 * Force features to be enabled even if the host doesn't support them.
1669 * This is dangerous and should be done only for testing CPUID
1670 * compatibility.
1671 */
1672 bool force_features;
1673 bool expose_kvm;
1674 bool expose_tcg;
1675 bool migratable;
1676 bool migrate_smi_count;
1677 bool max_features; /* Enable all supported features automatically */
1678 uint32_t apic_id;
1679
1680 /* Enables publishing of TSC increment and Local APIC bus frequencies to
1681 * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */
1682 bool vmware_cpuid_freq;
1683
1684 /* if true the CPUID code directly forward host cache leaves to the guest */
1685 bool cache_info_passthrough;
1686
1687 /* if true the CPUID code directly forwards
1688 * host monitor/mwait leaves to the guest */
1689 struct {
1690 uint32_t eax;
1691 uint32_t ebx;
1692 uint32_t ecx;
1693 uint32_t edx;
1694 } mwait;
1695
1696 /* Features that were filtered out because of missing host capabilities */
1697 FeatureWordArray filtered_features;
1698
1699 /* Enable PMU CPUID bits. This can't be enabled by default yet because
1700 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
1701 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
1702 * capabilities) directly to the guest.
1703 */
1704 bool enable_pmu;
1705
1706 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
1707 * disabled by default to avoid breaking migration between QEMU with
1708 * different LMCE configurations.
1709 */
1710 bool enable_lmce;
1711
1712 /* Compatibility bits for old machine types.
1713 * If true present virtual l3 cache for VM, the vcpus in the same virtual
1714 * socket share an virtual l3 cache.
1715 */
1716 bool enable_l3_cache;
1717
1718 /* Compatibility bits for old machine types.
1719 * If true present the old cache topology information
1720 */
1721 bool legacy_cache;
1722
1723 /* Compatibility bits for old machine types: */
1724 bool enable_cpuid_0xb;
1725
1726 /* Enable auto level-increase for all CPUID leaves */
1727 bool full_cpuid_auto_level;
1728
1729 /* Enable auto level-increase for Intel Processor Trace leave */
1730 bool intel_pt_auto_level;
1731
1732 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
1733 bool fill_mtrr_mask;
1734
1735 /* if true override the phys_bits value with a value read from the host */
1736 bool host_phys_bits;
1737
1738 /* if set, limit maximum value for phys_bits when host_phys_bits is true */
1739 uint8_t host_phys_bits_limit;
1740
1741 /* Stop SMI delivery for migration compatibility with old machines */
1742 bool kvm_no_smi_migration;
1743
1744 /* Number of physical address bits supported */
1745 uint32_t phys_bits;
1746
1747 /* in order to simplify APIC support, we leave this pointer to the
1748 user */
1749 struct DeviceState *apic_state;
1750 struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
1751 Notifier machine_done;
1752
1753 struct kvm_msrs *kvm_msr_buf;
1754
1755 int32_t node_id; /* NUMA node this CPU belongs to */
1756 int32_t socket_id;
1757 int32_t die_id;
1758 int32_t core_id;
1759 int32_t thread_id;
1760
1761 int32_t hv_max_vps;
1762 };
1763
1764
1765 #ifndef CONFIG_USER_ONLY
1766 extern VMStateDescription vmstate_x86_cpu;
1767 #endif
1768
1769 /**
1770 * x86_cpu_do_interrupt:
1771 * @cpu: vCPU the interrupt is to be handled by.
1772 */
1773 void x86_cpu_do_interrupt(CPUState *cpu);
1774 bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
1775 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
1776
1777 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
1778 int cpuid, void *opaque);
1779 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
1780 int cpuid, void *opaque);
1781 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
1782 void *opaque);
1783 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
1784 void *opaque);
1785
1786 void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
1787 Error **errp);
1788
1789 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
1790
1791 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
1792 MemTxAttrs *attrs);
1793
1794 int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
1795 int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
1796
1797 void x86_cpu_exec_enter(CPUState *cpu);
1798 void x86_cpu_exec_exit(CPUState *cpu);
1799
1800 void x86_cpu_list(void);
1801 int cpu_x86_support_mca_broadcast(CPUX86State *env);
1802
1803 int cpu_get_pic_interrupt(CPUX86State *s);
1804 /* MSDOS compatibility mode FPU exception support */
1805 void x86_register_ferr_irq(qemu_irq irq);
1806 void cpu_set_ignne(void);
1807 /* mpx_helper.c */
1808 void cpu_sync_bndcs_hflags(CPUX86State *env);
1809
1810 /* this function must always be used to load data in the segment
1811 cache: it synchronizes the hflags with the segment cache values */
1812 static inline void cpu_x86_load_seg_cache(CPUX86State *env,
1813 int seg_reg, unsigned int selector,
1814 target_ulong base,
1815 unsigned int limit,
1816 unsigned int flags)
1817 {
1818 SegmentCache *sc;
1819 unsigned int new_hflags;
1820
1821 sc = &env->segs[seg_reg];
1822 sc->selector = selector;
1823 sc->base = base;
1824 sc->limit = limit;
1825 sc->flags = flags;
1826
1827 /* update the hidden flags */
1828 {
1829 if (seg_reg == R_CS) {
1830 #ifdef TARGET_X86_64
1831 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
1832 /* long mode */
1833 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1834 env->hflags &= ~(HF_ADDSEG_MASK);
1835 } else
1836 #endif
1837 {
1838 /* legacy / compatibility case */
1839 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
1840 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
1841 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
1842 new_hflags;
1843 }
1844 }
1845 if (seg_reg == R_SS) {
1846 int cpl = (flags >> DESC_DPL_SHIFT) & 3;
1847 #if HF_CPL_MASK != 3
1848 #error HF_CPL_MASK is hardcoded
1849 #endif
1850 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
1851 /* Possibly switch between BNDCFGS and BNDCFGU */
1852 cpu_sync_bndcs_hflags(env);
1853 }
1854 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
1855 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
1856 if (env->hflags & HF_CS64_MASK) {
1857 /* zero base assumed for DS, ES and SS in long mode */
1858 } else if (!(env->cr[0] & CR0_PE_MASK) ||
1859 (env->eflags & VM_MASK) ||
1860 !(env->hflags & HF_CS32_MASK)) {
1861 /* XXX: try to avoid this test. The problem comes from the
1862 fact that is real mode or vm86 mode we only modify the
1863 'base' and 'selector' fields of the segment cache to go
1864 faster. A solution may be to force addseg to one in
1865 translate-i386.c. */
1866 new_hflags |= HF_ADDSEG_MASK;
1867 } else {
1868 new_hflags |= ((env->segs[R_DS].base |
1869 env->segs[R_ES].base |
1870 env->segs[R_SS].base) != 0) <<
1871 HF_ADDSEG_SHIFT;
1872 }
1873 env->hflags = (env->hflags &
1874 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
1875 }
1876 }
1877
1878 static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
1879 uint8_t sipi_vector)
1880 {
1881 CPUState *cs = CPU(cpu);
1882 CPUX86State *env = &cpu->env;
1883
1884 env->eip = 0;
1885 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
1886 sipi_vector << 12,
1887 env->segs[R_CS].limit,
1888 env->segs[R_CS].flags);
1889 cs->halted = 0;
1890 }
1891
1892 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1893 target_ulong *base, unsigned int *limit,
1894 unsigned int *flags);
1895
1896 /* op_helper.c */
1897 /* used for debug or cpu save/restore */
1898
1899 /* cpu-exec.c */
1900 /* the following helpers are only usable in user mode simulation as
1901 they can trigger unexpected exceptions */
1902 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
1903 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
1904 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
1905 void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr);
1906 void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr);
1907
1908 /* you can call this signal handler from your SIGBUS and SIGSEGV
1909 signal handlers to inform the virtual CPU of exceptions. non zero
1910 is returned if the signal was handled by the virtual CPU. */
1911 int cpu_x86_signal_handler(int host_signum, void *pinfo,
1912 void *puc);
1913
1914 /* cpu.c */
1915 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1916 uint32_t *eax, uint32_t *ebx,
1917 uint32_t *ecx, uint32_t *edx);
1918 void cpu_clear_apic_feature(CPUX86State *env);
1919 void host_cpuid(uint32_t function, uint32_t count,
1920 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
1921 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
1922 bool cpu_x86_use_epyc_apic_id_encoding(const char *cpu_type);
1923
1924 /* helper.c */
1925 bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1926 MMUAccessType access_type, int mmu_idx,
1927 bool probe, uintptr_t retaddr);
1928 void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
1929
1930 #ifndef CONFIG_USER_ONLY
1931 static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
1932 {
1933 return !!attrs.secure;
1934 }
1935
1936 static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
1937 {
1938 return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
1939 }
1940
1941 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
1942 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
1943 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
1944 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
1945 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
1946 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
1947 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
1948 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
1949 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
1950 #endif
1951
1952 void breakpoint_handler(CPUState *cs);
1953
1954 /* will be suppressed */
1955 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
1956 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
1957 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
1958 void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
1959
1960 /* hw/pc.c */
1961 uint64_t cpu_get_tsc(CPUX86State *env);
1962
1963 /* XXX: This value should match the one returned by CPUID
1964 * and in exec.c */
1965 # if defined(TARGET_X86_64)
1966 # define TCG_PHYS_ADDR_BITS 40
1967 # else
1968 # define TCG_PHYS_ADDR_BITS 36
1969 # endif
1970
1971 #define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS)
1972
1973 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
1974 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
1975 #define CPU_RESOLVING_TYPE TYPE_X86_CPU
1976
1977 #ifdef TARGET_X86_64
1978 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64")
1979 #else
1980 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
1981 #endif
1982
1983 #define cpu_signal_handler cpu_x86_signal_handler
1984 #define cpu_list x86_cpu_list
1985
1986 /* MMU modes definitions */
1987 #define MMU_KSMAP_IDX 0
1988 #define MMU_USER_IDX 1
1989 #define MMU_KNOSMAP_IDX 2
1990 static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
1991 {
1992 return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
1993 (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
1994 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
1995 }
1996
1997 static inline int cpu_mmu_index_kernel(CPUX86State *env)
1998 {
1999 return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
2000 ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
2001 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
2002 }
2003
2004 #define CC_DST (env->cc_dst)
2005 #define CC_SRC (env->cc_src)
2006 #define CC_SRC2 (env->cc_src2)
2007 #define CC_OP (env->cc_op)
2008
2009 /* n must be a constant to be efficient */
2010 static inline target_long lshift(target_long x, int n)
2011 {
2012 if (n >= 0) {
2013 return x << n;
2014 } else {
2015 return x >> (-n);
2016 }
2017 }
2018
2019 /* float macros */
2020 #define FT0 (env->ft0)
2021 #define ST0 (env->fpregs[env->fpstt].d)
2022 #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
2023 #define ST1 ST(1)
2024
2025 /* translate.c */
2026 void tcg_x86_init(void);
2027
2028 typedef CPUX86State CPUArchState;
2029 typedef X86CPU ArchCPU;
2030
2031 #include "exec/cpu-all.h"
2032 #include "svm.h"
2033
2034 #if !defined(CONFIG_USER_ONLY)
2035 #include "hw/i386/apic.h"
2036 #endif
2037
2038 static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
2039 target_ulong *cs_base, uint32_t *flags)
2040 {
2041 *cs_base = env->segs[R_CS].base;
2042 *pc = *cs_base + env->eip;
2043 *flags = env->hflags |
2044 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
2045 }
2046
2047 void do_cpu_init(X86CPU *cpu);
2048 void do_cpu_sipi(X86CPU *cpu);
2049
2050 #define MCE_INJECT_BROADCAST 1
2051 #define MCE_INJECT_UNCOND_AO 2
2052
2053 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
2054 uint64_t status, uint64_t mcg_status, uint64_t addr,
2055 uint64_t misc, int flags);
2056
2057 /* excp_helper.c */
2058 void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
2059 void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
2060 uintptr_t retaddr);
2061 void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
2062 int error_code);
2063 void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
2064 int error_code, uintptr_t retaddr);
2065 void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
2066 int error_code, int next_eip_addend);
2067
2068 /* cc_helper.c */
2069 extern const uint8_t parity_table[256];
2070 uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
2071
2072 static inline uint32_t cpu_compute_eflags(CPUX86State *env)
2073 {
2074 uint32_t eflags = env->eflags;
2075 if (tcg_enabled()) {
2076 eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
2077 }
2078 return eflags;
2079 }
2080
2081 /* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
2082 * after generating a call to a helper that uses this.
2083 */
2084 static inline void cpu_load_eflags(CPUX86State *env, int eflags,
2085 int update_mask)
2086 {
2087 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
2088 CC_OP = CC_OP_EFLAGS;
2089 env->df = 1 - (2 * ((eflags >> 10) & 1));
2090 env->eflags = (env->eflags & ~update_mask) |
2091 (eflags & update_mask) | 0x2;
2092 }
2093
2094 /* load efer and update the corresponding hflags. XXX: do consistency
2095 checks with cpuid bits? */
2096 static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
2097 {
2098 env->efer = val;
2099 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
2100 if (env->efer & MSR_EFER_LMA) {
2101 env->hflags |= HF_LMA_MASK;
2102 }
2103 if (env->efer & MSR_EFER_SVME) {
2104 env->hflags |= HF_SVME_MASK;
2105 }
2106 }
2107
2108 static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
2109 {
2110 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
2111 }
2112
2113 static inline int32_t x86_get_a20_mask(CPUX86State *env)
2114 {
2115 if (env->hflags & HF_SMM_MASK) {
2116 return -1;
2117 } else {
2118 return env->a20_mask;
2119 }
2120 }
2121
2122 static inline bool cpu_has_vmx(CPUX86State *env)
2123 {
2124 return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
2125 }
2126
2127 static inline bool cpu_has_svm(CPUX86State *env)
2128 {
2129 return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
2130 }
2131
2132 /*
2133 * In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
2134 * Since it was set, CR4.VMXE must remain set as long as vCPU is in
2135 * VMX operation. This is because CR4.VMXE is one of the bits set
2136 * in MSR_IA32_VMX_CR4_FIXED1.
2137 *
2138 * There is one exception to above statement when vCPU enters SMM mode.
2139 * When a vCPU enters SMM mode, it temporarily exit VMX operation and
2140 * may also reset CR4.VMXE during execution in SMM mode.
2141 * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation
2142 * and CR4.VMXE is restored to it's original value of being set.
2143 *
2144 * Therefore, when vCPU is not in SMM mode, we can infer whether
2145 * VMX is being used by examining CR4.VMXE. Otherwise, we cannot
2146 * know for certain.
2147 */
2148 static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
2149 {
2150 return cpu_has_vmx(env) &&
2151 ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK));
2152 }
2153
2154 /* fpu_helper.c */
2155 void update_fp_status(CPUX86State *env);
2156 void update_mxcsr_status(CPUX86State *env);
2157 void update_mxcsr_from_sse_status(CPUX86State *env);
2158
2159 static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
2160 {
2161 env->mxcsr = mxcsr;
2162 if (tcg_enabled()) {
2163 update_mxcsr_status(env);
2164 }
2165 }
2166
2167 static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
2168 {
2169 env->fpuc = fpuc;
2170 if (tcg_enabled()) {
2171 update_fp_status(env);
2172 }
2173 }
2174
2175 /* mem_helper.c */
2176 void helper_lock_init(void);
2177
2178 /* svm_helper.c */
2179 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
2180 uint64_t param, uintptr_t retaddr);
2181 void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code,
2182 uint64_t exit_info_1, uintptr_t retaddr);
2183 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1);
2184
2185 /* seg_helper.c */
2186 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
2187
2188 /* smm_helper.c */
2189 void do_smm_enter(X86CPU *cpu);
2190
2191 /* apic.c */
2192 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
2193 void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
2194 TPRAccess access);
2195
2196
2197 /* Change the value of a KVM-specific default
2198 *
2199 * If value is NULL, no default will be set and the original
2200 * value from the CPU model table will be kept.
2201 *
2202 * It is valid to call this function only for properties that
2203 * are already present in the kvm_default_props table.
2204 */
2205 void x86_cpu_change_kvm_default(const char *prop, const char *value);
2206
2207 /* Special values for X86CPUVersion: */
2208
2209 /* Resolve to latest CPU version */
2210 #define CPU_VERSION_LATEST -1
2211
2212 /*
2213 * Resolve to version defined by current machine type.
2214 * See x86_cpu_set_default_version()
2215 */
2216 #define CPU_VERSION_AUTO -2
2217
2218 /* Don't resolve to any versioned CPU models, like old QEMU versions */
2219 #define CPU_VERSION_LEGACY 0
2220
2221 typedef int X86CPUVersion;
2222
2223 /*
2224 * Set default CPU model version for CPU models having
2225 * version == CPU_VERSION_AUTO.
2226 */
2227 void x86_cpu_set_default_version(X86CPUVersion version);
2228
2229 /* Return name of 32-bit register, from a R_* constant */
2230 const char *get_register_name_32(unsigned int reg);
2231
2232 void enable_compat_apic_id_mode(void);
2233
2234 #define APIC_DEFAULT_ADDRESS 0xfee00000
2235 #define APIC_SPACE_SIZE 0x100000
2236
2237 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags);
2238
2239 /* cpu.c */
2240 bool cpu_is_bsp(X86CPU *cpu);
2241
2242 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf);
2243 void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf);
2244 void x86_update_hflags(CPUX86State* env);
2245
2246 static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
2247 {
2248 return !!(cpu->hyperv_features & BIT(feat));
2249 }
2250
2251 #if defined(TARGET_X86_64) && \
2252 defined(CONFIG_USER_ONLY) && \
2253 defined(CONFIG_LINUX)
2254 # define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
2255 #endif
2256
2257 #endif /* I386_CPU_H */