exec: Make ldl_*_phys input an AddressSpace
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
26
27 //#define DEBUG_MMU
28
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
30 {
31 int cpuver = env->cpuid_version;
32
33 if (family == NULL || model == NULL) {
34 return;
35 }
36
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39 }
40
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
43 {
44 int family = 0;
45 int model = 0;
46
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
50 }
51
52 return 0;
53 }
54
55 /***********************************************************/
56 /* x86 debug */
57
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
61
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
66
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
71
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
76
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
81
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
86
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
91
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
96
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
101
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
106
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
111
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
116
117 "ADCX",
118 "ADOX",
119 "ADCOX",
120
121 "CLR",
122 };
123
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
127 {
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
134 {
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
137 }
138
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
141
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
155 }
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 { /* 32 bit mode */
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
164 },
165 { /* 64 bit mode */
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
170 }
171 };
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
176 }
177 done:
178 cpu_fprintf(f, "\n");
179 }
180
181 #define DUMP_CODE_BYTES_TOTAL 50
182 #define DUMP_CODE_BYTES_BACKWARD 20
183
184 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
185 int flags)
186 {
187 X86CPU *cpu = X86_CPU(cs);
188 CPUX86State *env = &cpu->env;
189 int eflags, i, nb;
190 char cc_op_name[32];
191 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
192
193 eflags = cpu_compute_eflags(env);
194 #ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
228 (env->a20_mask >> 20) & 1,
229 (env->hflags >> HF_SMM_SHIFT) & 1,
230 cs->halted);
231 } else
232 #endif
233 {
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
255 (env->a20_mask >> 20) & 1,
256 (env->hflags >> HF_SMM_SHIFT) & 1,
257 cs->halted);
258 }
259
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
263 }
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
266
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
281 env->dr[6], env->dr[7]);
282 } else
283 #endif
284 {
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
296 }
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
299 }
300 if (flags & CPU_DUMP_CCOP) {
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305 #ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311 #endif
312 {
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
316 }
317 }
318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
319 if (flags & CPU_DUMP_FPU) {
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
324 }
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
335 i, u.l.lower, u.l.upper);
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
340 }
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
347 i,
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
356 }
357 }
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
363
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
366 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
370 }
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
373 }
374 cpu_fprintf(f, "\n");
375 }
376 }
377
378 /***********************************************************/
379 /* x86 mmu */
380 /* XXX: add PGE support */
381
382 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
383 {
384 CPUX86State *env = &cpu->env;
385
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388 #if defined(DEBUG_MMU)
389 printf("A20 update: a20=%d\n", a20_state);
390 #endif
391 /* if the cpu is currently executing code, we must unlink it and
392 all the potentially executing TB */
393 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
394
395 /* when a20 is changed, all the MMU mappings are invalid, so
396 we must flush everything */
397 tlb_flush(env, 1);
398 env->a20_mask = ~(1 << 20) | (a20_state << 20);
399 }
400 }
401
402 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
403 {
404 int pe_state;
405
406 #if defined(DEBUG_MMU)
407 printf("CR0 update: CR0=0x%08x\n", new_cr0);
408 #endif
409 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
410 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
411 tlb_flush(env, 1);
412 }
413
414 #ifdef TARGET_X86_64
415 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
416 (env->efer & MSR_EFER_LME)) {
417 /* enter in long mode */
418 /* XXX: generate an exception */
419 if (!(env->cr[4] & CR4_PAE_MASK))
420 return;
421 env->efer |= MSR_EFER_LMA;
422 env->hflags |= HF_LMA_MASK;
423 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
424 (env->efer & MSR_EFER_LMA)) {
425 /* exit long mode */
426 env->efer &= ~MSR_EFER_LMA;
427 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
428 env->eip &= 0xffffffff;
429 }
430 #endif
431 env->cr[0] = new_cr0 | CR0_ET_MASK;
432
433 /* update PE flag in hidden flags */
434 pe_state = (env->cr[0] & CR0_PE_MASK);
435 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
436 /* ensure that ADDSEG is always set in real mode */
437 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
438 /* update FPU flags */
439 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
440 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
441 }
442
443 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
444 the PDPT */
445 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
446 {
447 env->cr[3] = new_cr3;
448 if (env->cr[0] & CR0_PG_MASK) {
449 #if defined(DEBUG_MMU)
450 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
451 #endif
452 tlb_flush(env, 0);
453 }
454 }
455
456 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
457 {
458 #if defined(DEBUG_MMU)
459 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
460 #endif
461 if ((new_cr4 ^ env->cr[4]) &
462 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
463 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
464 tlb_flush(env, 1);
465 }
466 /* SSE handling */
467 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
468 new_cr4 &= ~CR4_OSFXSR_MASK;
469 }
470 env->hflags &= ~HF_OSFXSR_MASK;
471 if (new_cr4 & CR4_OSFXSR_MASK) {
472 env->hflags |= HF_OSFXSR_MASK;
473 }
474
475 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
476 new_cr4 &= ~CR4_SMAP_MASK;
477 }
478 env->hflags &= ~HF_SMAP_MASK;
479 if (new_cr4 & CR4_SMAP_MASK) {
480 env->hflags |= HF_SMAP_MASK;
481 }
482
483 env->cr[4] = new_cr4;
484 }
485
486 #if defined(CONFIG_USER_ONLY)
487
488 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
489 int is_write, int mmu_idx)
490 {
491 /* user mode only emulation */
492 is_write &= 1;
493 env->cr[2] = addr;
494 env->error_code = (is_write << PG_ERROR_W_BIT);
495 env->error_code |= PG_ERROR_U_MASK;
496 env->exception_index = EXCP0E_PAGE;
497 return 1;
498 }
499
500 #else
501
502 /* XXX: This value should match the one returned by CPUID
503 * and in exec.c */
504 # if defined(TARGET_X86_64)
505 # define PHYS_ADDR_MASK 0xfffffff000LL
506 # else
507 # define PHYS_ADDR_MASK 0xffffff000LL
508 # endif
509
510 /* return value:
511 -1 = cannot handle fault
512 0 = nothing more to do
513 1 = generate PF fault
514 */
515 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
516 int is_write1, int mmu_idx)
517 {
518 CPUState *cs = ENV_GET_CPU(env);
519 uint64_t ptep, pte;
520 target_ulong pde_addr, pte_addr;
521 int error_code, is_dirty, prot, page_size, is_write, is_user;
522 hwaddr paddr;
523 uint32_t page_offset;
524 target_ulong vaddr, virt_addr;
525
526 is_user = mmu_idx == MMU_USER_IDX;
527 #if defined(DEBUG_MMU)
528 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
529 addr, is_write1, is_user, env->eip);
530 #endif
531 is_write = is_write1 & 1;
532
533 if (!(env->cr[0] & CR0_PG_MASK)) {
534 pte = addr;
535 #ifdef TARGET_X86_64
536 if (!(env->hflags & HF_LMA_MASK)) {
537 /* Without long mode we can only address 32bits in real mode */
538 pte = (uint32_t)pte;
539 }
540 #endif
541 virt_addr = addr & TARGET_PAGE_MASK;
542 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
543 page_size = 4096;
544 goto do_mapping;
545 }
546
547 if (env->cr[4] & CR4_PAE_MASK) {
548 uint64_t pde, pdpe;
549 target_ulong pdpe_addr;
550
551 #ifdef TARGET_X86_64
552 if (env->hflags & HF_LMA_MASK) {
553 uint64_t pml4e_addr, pml4e;
554 int32_t sext;
555
556 /* test virtual address sign extension */
557 sext = (int64_t)addr >> 47;
558 if (sext != 0 && sext != -1) {
559 env->error_code = 0;
560 env->exception_index = EXCP0D_GPF;
561 return 1;
562 }
563
564 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
565 env->a20_mask;
566 pml4e = ldq_phys(pml4e_addr);
567 if (!(pml4e & PG_PRESENT_MASK)) {
568 error_code = 0;
569 goto do_fault;
570 }
571 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
572 error_code = PG_ERROR_RSVD_MASK;
573 goto do_fault;
574 }
575 if (!(pml4e & PG_ACCESSED_MASK)) {
576 pml4e |= PG_ACCESSED_MASK;
577 stl_phys_notdirty(pml4e_addr, pml4e);
578 }
579 ptep = pml4e ^ PG_NX_MASK;
580 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
581 env->a20_mask;
582 pdpe = ldq_phys(pdpe_addr);
583 if (!(pdpe & PG_PRESENT_MASK)) {
584 error_code = 0;
585 goto do_fault;
586 }
587 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
588 error_code = PG_ERROR_RSVD_MASK;
589 goto do_fault;
590 }
591 ptep &= pdpe ^ PG_NX_MASK;
592 if (!(pdpe & PG_ACCESSED_MASK)) {
593 pdpe |= PG_ACCESSED_MASK;
594 stl_phys_notdirty(pdpe_addr, pdpe);
595 }
596 } else
597 #endif
598 {
599 /* XXX: load them when cr3 is loaded ? */
600 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
601 env->a20_mask;
602 pdpe = ldq_phys(pdpe_addr);
603 if (!(pdpe & PG_PRESENT_MASK)) {
604 error_code = 0;
605 goto do_fault;
606 }
607 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
608 }
609
610 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
611 env->a20_mask;
612 pde = ldq_phys(pde_addr);
613 if (!(pde & PG_PRESENT_MASK)) {
614 error_code = 0;
615 goto do_fault;
616 }
617 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
618 error_code = PG_ERROR_RSVD_MASK;
619 goto do_fault;
620 }
621 ptep &= pde ^ PG_NX_MASK;
622 if (pde & PG_PSE_MASK) {
623 /* 2 MB page */
624 page_size = 2048 * 1024;
625 ptep ^= PG_NX_MASK;
626 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
627 goto do_fault_protect;
628 }
629 switch (mmu_idx) {
630 case MMU_USER_IDX:
631 if (!(ptep & PG_USER_MASK)) {
632 goto do_fault_protect;
633 }
634 if (is_write && !(ptep & PG_RW_MASK)) {
635 goto do_fault_protect;
636 }
637 break;
638
639 case MMU_KERNEL_IDX:
640 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
641 (ptep & PG_USER_MASK)) {
642 goto do_fault_protect;
643 }
644 /* fall through */
645 case MMU_KSMAP_IDX:
646 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
647 (ptep & PG_USER_MASK)) {
648 goto do_fault_protect;
649 }
650 if ((env->cr[0] & CR0_WP_MASK) &&
651 is_write && !(ptep & PG_RW_MASK)) {
652 goto do_fault_protect;
653 }
654 break;
655
656 default: /* cannot happen */
657 break;
658 }
659 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
660 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
661 pde |= PG_ACCESSED_MASK;
662 if (is_dirty)
663 pde |= PG_DIRTY_MASK;
664 stl_phys_notdirty(pde_addr, pde);
665 }
666 /* align to page_size */
667 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
668 virt_addr = addr & ~(page_size - 1);
669 } else {
670 /* 4 KB page */
671 if (!(pde & PG_ACCESSED_MASK)) {
672 pde |= PG_ACCESSED_MASK;
673 stl_phys_notdirty(pde_addr, pde);
674 }
675 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
676 env->a20_mask;
677 pte = ldq_phys(pte_addr);
678 if (!(pte & PG_PRESENT_MASK)) {
679 error_code = 0;
680 goto do_fault;
681 }
682 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
683 error_code = PG_ERROR_RSVD_MASK;
684 goto do_fault;
685 }
686 /* combine pde and pte nx, user and rw protections */
687 ptep &= pte ^ PG_NX_MASK;
688 ptep ^= PG_NX_MASK;
689 if ((ptep & PG_NX_MASK) && is_write1 == 2)
690 goto do_fault_protect;
691 switch (mmu_idx) {
692 case MMU_USER_IDX:
693 if (!(ptep & PG_USER_MASK)) {
694 goto do_fault_protect;
695 }
696 if (is_write && !(ptep & PG_RW_MASK)) {
697 goto do_fault_protect;
698 }
699 break;
700
701 case MMU_KERNEL_IDX:
702 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
703 (ptep & PG_USER_MASK)) {
704 goto do_fault_protect;
705 }
706 /* fall through */
707 case MMU_KSMAP_IDX:
708 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
709 (ptep & PG_USER_MASK)) {
710 goto do_fault_protect;
711 }
712 if ((env->cr[0] & CR0_WP_MASK) &&
713 is_write && !(ptep & PG_RW_MASK)) {
714 goto do_fault_protect;
715 }
716 break;
717
718 default: /* cannot happen */
719 break;
720 }
721 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
722 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
723 pte |= PG_ACCESSED_MASK;
724 if (is_dirty)
725 pte |= PG_DIRTY_MASK;
726 stl_phys_notdirty(pte_addr, pte);
727 }
728 page_size = 4096;
729 virt_addr = addr & ~0xfff;
730 pte = pte & (PHYS_ADDR_MASK | 0xfff);
731 }
732 } else {
733 uint32_t pde;
734
735 /* page directory entry */
736 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
737 env->a20_mask;
738 pde = ldl_phys(cs->as, pde_addr);
739 if (!(pde & PG_PRESENT_MASK)) {
740 error_code = 0;
741 goto do_fault;
742 }
743 /* if PSE bit is set, then we use a 4MB page */
744 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
745 page_size = 4096 * 1024;
746 switch (mmu_idx) {
747 case MMU_USER_IDX:
748 if (!(pde & PG_USER_MASK)) {
749 goto do_fault_protect;
750 }
751 if (is_write && !(pde & PG_RW_MASK)) {
752 goto do_fault_protect;
753 }
754 break;
755
756 case MMU_KERNEL_IDX:
757 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
758 (pde & PG_USER_MASK)) {
759 goto do_fault_protect;
760 }
761 /* fall through */
762 case MMU_KSMAP_IDX:
763 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
764 (pde & PG_USER_MASK)) {
765 goto do_fault_protect;
766 }
767 if ((env->cr[0] & CR0_WP_MASK) &&
768 is_write && !(pde & PG_RW_MASK)) {
769 goto do_fault_protect;
770 }
771 break;
772
773 default: /* cannot happen */
774 break;
775 }
776 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
777 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
778 pde |= PG_ACCESSED_MASK;
779 if (is_dirty)
780 pde |= PG_DIRTY_MASK;
781 stl_phys_notdirty(pde_addr, pde);
782 }
783
784 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
785 ptep = pte;
786 virt_addr = addr & ~(page_size - 1);
787 } else {
788 if (!(pde & PG_ACCESSED_MASK)) {
789 pde |= PG_ACCESSED_MASK;
790 stl_phys_notdirty(pde_addr, pde);
791 }
792
793 /* page directory entry */
794 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
795 env->a20_mask;
796 pte = ldl_phys(cs->as, pte_addr);
797 if (!(pte & PG_PRESENT_MASK)) {
798 error_code = 0;
799 goto do_fault;
800 }
801 /* combine pde and pte user and rw protections */
802 ptep = pte & pde;
803 switch (mmu_idx) {
804 case MMU_USER_IDX:
805 if (!(ptep & PG_USER_MASK)) {
806 goto do_fault_protect;
807 }
808 if (is_write && !(ptep & PG_RW_MASK)) {
809 goto do_fault_protect;
810 }
811 break;
812
813 case MMU_KERNEL_IDX:
814 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
815 (ptep & PG_USER_MASK)) {
816 goto do_fault_protect;
817 }
818 /* fall through */
819 case MMU_KSMAP_IDX:
820 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
821 (ptep & PG_USER_MASK)) {
822 goto do_fault_protect;
823 }
824 if ((env->cr[0] & CR0_WP_MASK) &&
825 is_write && !(ptep & PG_RW_MASK)) {
826 goto do_fault_protect;
827 }
828 break;
829
830 default: /* cannot happen */
831 break;
832 }
833 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
834 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
835 pte |= PG_ACCESSED_MASK;
836 if (is_dirty)
837 pte |= PG_DIRTY_MASK;
838 stl_phys_notdirty(pte_addr, pte);
839 }
840 page_size = 4096;
841 virt_addr = addr & ~0xfff;
842 }
843 }
844 /* the page can be put in the TLB */
845 prot = PAGE_READ;
846 if (!(ptep & PG_NX_MASK))
847 prot |= PAGE_EXEC;
848 if (pte & PG_DIRTY_MASK) {
849 /* only set write access if already dirty... otherwise wait
850 for dirty access */
851 if (is_user) {
852 if (ptep & PG_RW_MASK)
853 prot |= PAGE_WRITE;
854 } else {
855 if (!(env->cr[0] & CR0_WP_MASK) ||
856 (ptep & PG_RW_MASK))
857 prot |= PAGE_WRITE;
858 }
859 }
860 do_mapping:
861 pte = pte & env->a20_mask;
862
863 /* Even if 4MB pages, we map only one 4KB page in the cache to
864 avoid filling it too fast */
865 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
866 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
867 vaddr = virt_addr + page_offset;
868
869 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
870 return 0;
871 do_fault_protect:
872 error_code = PG_ERROR_P_MASK;
873 do_fault:
874 error_code |= (is_write << PG_ERROR_W_BIT);
875 if (is_user)
876 error_code |= PG_ERROR_U_MASK;
877 if (is_write1 == 2 &&
878 (((env->efer & MSR_EFER_NXE) &&
879 (env->cr[4] & CR4_PAE_MASK)) ||
880 (env->cr[4] & CR4_SMEP_MASK)))
881 error_code |= PG_ERROR_I_D_MASK;
882 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
883 /* cr2 is not modified in case of exceptions */
884 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
885 addr);
886 } else {
887 env->cr[2] = addr;
888 }
889 env->error_code = error_code;
890 env->exception_index = EXCP0E_PAGE;
891 return 1;
892 }
893
894 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
895 {
896 X86CPU *cpu = X86_CPU(cs);
897 CPUX86State *env = &cpu->env;
898 target_ulong pde_addr, pte_addr;
899 uint64_t pte;
900 hwaddr paddr;
901 uint32_t page_offset;
902 int page_size;
903
904 if (!(env->cr[0] & CR0_PG_MASK)) {
905 pte = addr & env->a20_mask;
906 page_size = 4096;
907 } else if (env->cr[4] & CR4_PAE_MASK) {
908 target_ulong pdpe_addr;
909 uint64_t pde, pdpe;
910
911 #ifdef TARGET_X86_64
912 if (env->hflags & HF_LMA_MASK) {
913 uint64_t pml4e_addr, pml4e;
914 int32_t sext;
915
916 /* test virtual address sign extension */
917 sext = (int64_t)addr >> 47;
918 if (sext != 0 && sext != -1)
919 return -1;
920
921 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
922 env->a20_mask;
923 pml4e = ldq_phys(pml4e_addr);
924 if (!(pml4e & PG_PRESENT_MASK))
925 return -1;
926
927 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
928 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
929 pdpe = ldq_phys(pdpe_addr);
930 if (!(pdpe & PG_PRESENT_MASK))
931 return -1;
932 } else
933 #endif
934 {
935 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
936 env->a20_mask;
937 pdpe = ldq_phys(pdpe_addr);
938 if (!(pdpe & PG_PRESENT_MASK))
939 return -1;
940 }
941
942 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
943 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
944 pde = ldq_phys(pde_addr);
945 if (!(pde & PG_PRESENT_MASK)) {
946 return -1;
947 }
948 if (pde & PG_PSE_MASK) {
949 /* 2 MB page */
950 page_size = 2048 * 1024;
951 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
952 } else {
953 /* 4 KB page */
954 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
955 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
956 page_size = 4096;
957 pte = ldq_phys(pte_addr);
958 }
959 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
960 if (!(pte & PG_PRESENT_MASK))
961 return -1;
962 } else {
963 uint32_t pde;
964
965 /* page directory entry */
966 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
967 pde = ldl_phys(cs->as, pde_addr);
968 if (!(pde & PG_PRESENT_MASK))
969 return -1;
970 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
971 pte = pde & ~0x003ff000; /* align to 4MB */
972 page_size = 4096 * 1024;
973 } else {
974 /* page directory entry */
975 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
976 pte = ldl_phys(cs->as, pte_addr);
977 if (!(pte & PG_PRESENT_MASK))
978 return -1;
979 page_size = 4096;
980 }
981 pte = pte & env->a20_mask;
982 }
983
984 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
985 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
986 return paddr;
987 }
988
989 void hw_breakpoint_insert(CPUX86State *env, int index)
990 {
991 int type = 0, err = 0;
992
993 switch (hw_breakpoint_type(env->dr[7], index)) {
994 case DR7_TYPE_BP_INST:
995 if (hw_breakpoint_enabled(env->dr[7], index)) {
996 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
997 &env->cpu_breakpoint[index]);
998 }
999 break;
1000 case DR7_TYPE_DATA_WR:
1001 type = BP_CPU | BP_MEM_WRITE;
1002 break;
1003 case DR7_TYPE_IO_RW:
1004 /* No support for I/O watchpoints yet */
1005 break;
1006 case DR7_TYPE_DATA_RW:
1007 type = BP_CPU | BP_MEM_ACCESS;
1008 break;
1009 }
1010
1011 if (type != 0) {
1012 err = cpu_watchpoint_insert(env, env->dr[index],
1013 hw_breakpoint_len(env->dr[7], index),
1014 type, &env->cpu_watchpoint[index]);
1015 }
1016
1017 if (err) {
1018 env->cpu_breakpoint[index] = NULL;
1019 }
1020 }
1021
1022 void hw_breakpoint_remove(CPUX86State *env, int index)
1023 {
1024 if (!env->cpu_breakpoint[index])
1025 return;
1026 switch (hw_breakpoint_type(env->dr[7], index)) {
1027 case DR7_TYPE_BP_INST:
1028 if (hw_breakpoint_enabled(env->dr[7], index)) {
1029 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1030 }
1031 break;
1032 case DR7_TYPE_DATA_WR:
1033 case DR7_TYPE_DATA_RW:
1034 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1035 break;
1036 case DR7_TYPE_IO_RW:
1037 /* No support for I/O watchpoints yet */
1038 break;
1039 }
1040 }
1041
1042 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1043 {
1044 target_ulong dr6;
1045 int reg;
1046 bool hit_enabled = false;
1047
1048 dr6 = env->dr[6] & ~0xf;
1049 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1050 bool bp_match = false;
1051 bool wp_match = false;
1052
1053 switch (hw_breakpoint_type(env->dr[7], reg)) {
1054 case DR7_TYPE_BP_INST:
1055 if (env->dr[reg] == env->eip) {
1056 bp_match = true;
1057 }
1058 break;
1059 case DR7_TYPE_DATA_WR:
1060 case DR7_TYPE_DATA_RW:
1061 if (env->cpu_watchpoint[reg] &&
1062 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1063 wp_match = true;
1064 }
1065 break;
1066 case DR7_TYPE_IO_RW:
1067 break;
1068 }
1069 if (bp_match || wp_match) {
1070 dr6 |= 1 << reg;
1071 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1072 hit_enabled = true;
1073 }
1074 }
1075 }
1076
1077 if (hit_enabled || force_dr6_update) {
1078 env->dr[6] = dr6;
1079 }
1080
1081 return hit_enabled;
1082 }
1083
1084 void breakpoint_handler(CPUX86State *env)
1085 {
1086 CPUBreakpoint *bp;
1087
1088 if (env->watchpoint_hit) {
1089 if (env->watchpoint_hit->flags & BP_CPU) {
1090 env->watchpoint_hit = NULL;
1091 if (check_hw_breakpoints(env, false)) {
1092 raise_exception(env, EXCP01_DB);
1093 } else {
1094 cpu_resume_from_signal(env, NULL);
1095 }
1096 }
1097 } else {
1098 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1099 if (bp->pc == env->eip) {
1100 if (bp->flags & BP_CPU) {
1101 check_hw_breakpoints(env, true);
1102 raise_exception(env, EXCP01_DB);
1103 }
1104 break;
1105 }
1106 }
1107 }
1108
1109 typedef struct MCEInjectionParams {
1110 Monitor *mon;
1111 X86CPU *cpu;
1112 int bank;
1113 uint64_t status;
1114 uint64_t mcg_status;
1115 uint64_t addr;
1116 uint64_t misc;
1117 int flags;
1118 } MCEInjectionParams;
1119
1120 static void do_inject_x86_mce(void *data)
1121 {
1122 MCEInjectionParams *params = data;
1123 CPUX86State *cenv = &params->cpu->env;
1124 CPUState *cpu = CPU(params->cpu);
1125 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1126
1127 cpu_synchronize_state(cpu);
1128
1129 /*
1130 * If there is an MCE exception being processed, ignore this SRAO MCE
1131 * unless unconditional injection was requested.
1132 */
1133 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1134 && !(params->status & MCI_STATUS_AR)
1135 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1136 return;
1137 }
1138
1139 if (params->status & MCI_STATUS_UC) {
1140 /*
1141 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1142 * reporting is disabled
1143 */
1144 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1145 monitor_printf(params->mon,
1146 "CPU %d: Uncorrected error reporting disabled\n",
1147 cpu->cpu_index);
1148 return;
1149 }
1150
1151 /*
1152 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1153 * reporting is disabled for the bank
1154 */
1155 if (banks[0] != ~(uint64_t)0) {
1156 monitor_printf(params->mon,
1157 "CPU %d: Uncorrected error reporting disabled for"
1158 " bank %d\n",
1159 cpu->cpu_index, params->bank);
1160 return;
1161 }
1162
1163 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1164 !(cenv->cr[4] & CR4_MCE_MASK)) {
1165 monitor_printf(params->mon,
1166 "CPU %d: Previous MCE still in progress, raising"
1167 " triple fault\n",
1168 cpu->cpu_index);
1169 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1170 qemu_system_reset_request();
1171 return;
1172 }
1173 if (banks[1] & MCI_STATUS_VAL) {
1174 params->status |= MCI_STATUS_OVER;
1175 }
1176 banks[2] = params->addr;
1177 banks[3] = params->misc;
1178 cenv->mcg_status = params->mcg_status;
1179 banks[1] = params->status;
1180 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1181 } else if (!(banks[1] & MCI_STATUS_VAL)
1182 || !(banks[1] & MCI_STATUS_UC)) {
1183 if (banks[1] & MCI_STATUS_VAL) {
1184 params->status |= MCI_STATUS_OVER;
1185 }
1186 banks[2] = params->addr;
1187 banks[3] = params->misc;
1188 banks[1] = params->status;
1189 } else {
1190 banks[1] |= MCI_STATUS_OVER;
1191 }
1192 }
1193
1194 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1195 uint64_t status, uint64_t mcg_status, uint64_t addr,
1196 uint64_t misc, int flags)
1197 {
1198 CPUState *cs = CPU(cpu);
1199 CPUX86State *cenv = &cpu->env;
1200 MCEInjectionParams params = {
1201 .mon = mon,
1202 .cpu = cpu,
1203 .bank = bank,
1204 .status = status,
1205 .mcg_status = mcg_status,
1206 .addr = addr,
1207 .misc = misc,
1208 .flags = flags,
1209 };
1210 unsigned bank_num = cenv->mcg_cap & 0xff;
1211
1212 if (!cenv->mcg_cap) {
1213 monitor_printf(mon, "MCE injection not supported\n");
1214 return;
1215 }
1216 if (bank >= bank_num) {
1217 monitor_printf(mon, "Invalid MCE bank number\n");
1218 return;
1219 }
1220 if (!(status & MCI_STATUS_VAL)) {
1221 monitor_printf(mon, "Invalid MCE status code\n");
1222 return;
1223 }
1224 if ((flags & MCE_INJECT_BROADCAST)
1225 && !cpu_x86_support_mca_broadcast(cenv)) {
1226 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1227 return;
1228 }
1229
1230 run_on_cpu(cs, do_inject_x86_mce, &params);
1231 if (flags & MCE_INJECT_BROADCAST) {
1232 CPUState *other_cs;
1233
1234 params.bank = 1;
1235 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1236 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1237 params.addr = 0;
1238 params.misc = 0;
1239 CPU_FOREACH(other_cs) {
1240 if (other_cs == cs) {
1241 continue;
1242 }
1243 params.cpu = X86_CPU(other_cs);
1244 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1245 }
1246 }
1247 }
1248
1249 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1250 {
1251 X86CPU *cpu = x86_env_get_cpu(env);
1252
1253 if (kvm_enabled()) {
1254 env->tpr_access_type = access;
1255
1256 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_TPR);
1257 } else {
1258 cpu_restore_state(env, env->mem_io_pc);
1259
1260 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1261 }
1262 }
1263 #endif /* !CONFIG_USER_ONLY */
1264
1265 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1266 target_ulong *base, unsigned int *limit,
1267 unsigned int *flags)
1268 {
1269 X86CPU *cpu = x86_env_get_cpu(env);
1270 CPUState *cs = CPU(cpu);
1271 SegmentCache *dt;
1272 target_ulong ptr;
1273 uint32_t e1, e2;
1274 int index;
1275
1276 if (selector & 0x4)
1277 dt = &env->ldt;
1278 else
1279 dt = &env->gdt;
1280 index = selector & ~7;
1281 ptr = dt->base + index;
1282 if ((index + 7) > dt->limit
1283 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1284 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1285 return 0;
1286
1287 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1288 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1289 if (e2 & DESC_G_MASK)
1290 *limit = (*limit << 12) | 0xfff;
1291 *flags = e2;
1292
1293 return 1;
1294 }
1295
1296 #if !defined(CONFIG_USER_ONLY)
1297 void do_cpu_init(X86CPU *cpu)
1298 {
1299 CPUState *cs = CPU(cpu);
1300 CPUX86State *env = &cpu->env;
1301 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1302 uint64_t pat = env->pat;
1303
1304 cpu_reset(cs);
1305 cs->interrupt_request = sipi;
1306 env->pat = pat;
1307 apic_init_reset(cpu->apic_state);
1308 }
1309
1310 void do_cpu_sipi(X86CPU *cpu)
1311 {
1312 apic_sipi(cpu->apic_state);
1313 }
1314 #else
1315 void do_cpu_init(X86CPU *cpu)
1316 {
1317 }
1318 void do_cpu_sipi(X86CPU *cpu)
1319 {
1320 }
1321 #endif