vmsvga: don't process more than 1024 fifo commands at once
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "sysemu/kvm.h"
24 #include "kvm_i386.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "sysemu/sysemu.h"
27 #include "monitor/monitor.h"
28 #include "hw/i386/apic_internal.h"
29 #endif
30
31 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
32 {
33 int cpuver = env->cpuid_version;
34
35 if (family == NULL || model == NULL) {
36 return;
37 }
38
39 *family = (cpuver >> 8) & 0x0f;
40 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
41 }
42
43 /* Broadcast MCA signal for processor version 06H_EH and above */
44 int cpu_x86_support_mca_broadcast(CPUX86State *env)
45 {
46 int family = 0;
47 int model = 0;
48
49 cpu_x86_version(env, &family, &model);
50 if ((family == 6 && model >= 14) || family > 6) {
51 return 1;
52 }
53
54 return 0;
55 }
56
57 /***********************************************************/
58 /* x86 debug */
59
60 static const char *cc_op_str[CC_OP_NB] = {
61 "DYNAMIC",
62 "EFLAGS",
63
64 "MULB",
65 "MULW",
66 "MULL",
67 "MULQ",
68
69 "ADDB",
70 "ADDW",
71 "ADDL",
72 "ADDQ",
73
74 "ADCB",
75 "ADCW",
76 "ADCL",
77 "ADCQ",
78
79 "SUBB",
80 "SUBW",
81 "SUBL",
82 "SUBQ",
83
84 "SBBB",
85 "SBBW",
86 "SBBL",
87 "SBBQ",
88
89 "LOGICB",
90 "LOGICW",
91 "LOGICL",
92 "LOGICQ",
93
94 "INCB",
95 "INCW",
96 "INCL",
97 "INCQ",
98
99 "DECB",
100 "DECW",
101 "DECL",
102 "DECQ",
103
104 "SHLB",
105 "SHLW",
106 "SHLL",
107 "SHLQ",
108
109 "SARB",
110 "SARW",
111 "SARL",
112 "SARQ",
113
114 "BMILGB",
115 "BMILGW",
116 "BMILGL",
117 "BMILGQ",
118
119 "ADCX",
120 "ADOX",
121 "ADCOX",
122
123 "CLR",
124 };
125
126 static void
127 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
128 const char *name, struct SegmentCache *sc)
129 {
130 #ifdef TARGET_X86_64
131 if (env->hflags & HF_CS64_MASK) {
132 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
133 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
134 } else
135 #endif
136 {
137 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
138 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
139 }
140
141 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
142 goto done;
143
144 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
145 if (sc->flags & DESC_S_MASK) {
146 if (sc->flags & DESC_CS_MASK) {
147 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
148 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
149 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
150 (sc->flags & DESC_R_MASK) ? 'R' : '-');
151 } else {
152 cpu_fprintf(f,
153 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
154 ? "DS " : "DS16");
155 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
156 (sc->flags & DESC_W_MASK) ? 'W' : '-');
157 }
158 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
159 } else {
160 static const char *sys_type_name[2][16] = {
161 { /* 32 bit mode */
162 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
163 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
164 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
165 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
166 },
167 { /* 64 bit mode */
168 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
169 "Reserved", "Reserved", "Reserved", "Reserved",
170 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
171 "Reserved", "IntGate64", "TrapGate64"
172 }
173 };
174 cpu_fprintf(f, "%s",
175 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
176 [(sc->flags & DESC_TYPE_MASK)
177 >> DESC_TYPE_SHIFT]);
178 }
179 done:
180 cpu_fprintf(f, "\n");
181 }
182
183 #ifndef CONFIG_USER_ONLY
184
185 /* ARRAY_SIZE check is not required because
186 * DeliveryMode(dm) has a size of 3 bit.
187 */
188 static inline const char *dm2str(uint32_t dm)
189 {
190 static const char *str[] = {
191 "Fixed",
192 "...",
193 "SMI",
194 "...",
195 "NMI",
196 "INIT",
197 "...",
198 "ExtINT"
199 };
200 return str[dm];
201 }
202
203 static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
204 const char *name, uint32_t lvt, bool is_timer)
205 {
206 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
207 cpu_fprintf(f,
208 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
209 name, lvt,
210 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
211 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
212 lvt & APIC_LVT_MASKED ? "masked" : "",
213 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
214 !is_timer ?
215 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
216 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
217 "tsc-deadline" : "one-shot",
218 dm2str(dm));
219 if (dm != APIC_DM_NMI) {
220 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
221 } else {
222 cpu_fprintf(f, "\n");
223 }
224 }
225
226 /* ARRAY_SIZE check is not required because
227 * destination shorthand has a size of 2 bit.
228 */
229 static inline const char *shorthand2str(uint32_t shorthand)
230 {
231 const char *str[] = {
232 "no-shorthand", "self", "all-self", "all"
233 };
234 return str[shorthand];
235 }
236
237 static inline uint8_t divider_conf(uint32_t divide_conf)
238 {
239 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
240
241 return divide_val == 7 ? 1 : 2 << divide_val;
242 }
243
244 static inline void mask2str(char *str, uint32_t val, uint8_t size)
245 {
246 while (size--) {
247 *str++ = (val >> size) & 1 ? '1' : '0';
248 }
249 *str = 0;
250 }
251
252 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
253
254 static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
255 APICCommonState *s, CPUX86State *env)
256 {
257 uint32_t icr = s->icr[0], icr2 = s->icr[1];
258 uint8_t dest_shorthand = \
259 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
260 bool logical_mod = icr & APIC_ICR_DEST_MOD;
261 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
262 uint32_t dest_field;
263 bool x2apic;
264
265 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
266 icr,
267 logical_mod ? "logical" : "physical",
268 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
269 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
270 shorthand2str(dest_shorthand));
271
272 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
273 if (dest_shorthand != 0) {
274 cpu_fprintf(f, "\n");
275 return;
276 }
277 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
278 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
279
280 if (!logical_mod) {
281 if (x2apic) {
282 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
283 } else {
284 cpu_fprintf(f, " cpu %u (APIC ID)\n",
285 dest_field & APIC_LOGDEST_XAPIC_ID);
286 }
287 return;
288 }
289
290 if (s->dest_mode == 0xf) { /* flat mode */
291 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
292 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
293 } else if (s->dest_mode == 0) { /* cluster mode */
294 if (x2apic) {
295 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
296 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
297 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
298 } else {
299 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
300 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
301 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
302 }
303 }
304 }
305
306 static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
307 const char *name, uint32_t *ireg_tab,
308 uint32_t *tmr_tab)
309 {
310 int i, empty = true;
311
312 cpu_fprintf(f, "%s\t ", name);
313 for (i = 0; i < 256; i++) {
314 if (apic_get_bit(ireg_tab, i)) {
315 cpu_fprintf(f, "%u%s ", i,
316 apic_get_bit(tmr_tab, i) ? "(level)" : "");
317 empty = false;
318 }
319 }
320 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
321 }
322
323 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
324 fprintf_function cpu_fprintf, int flags)
325 {
326 X86CPU *cpu = X86_CPU(cs);
327 APICCommonState *s = APIC_COMMON(cpu->apic_state);
328 uint32_t *lvt = s->lvt;
329
330 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
331 CPU(cpu)->cpu_index);
332 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
333 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
334 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
335 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
336 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
337 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
338
339 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
340 s->divide_conf & APIC_DCR_MASK,
341 divider_conf(s->divide_conf),
342 s->initial_count);
343
344 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
345 s->spurious_vec,
346 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
347 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
348 s->spurious_vec & APIC_VECTOR_MASK);
349
350 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
351
352 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
353
354 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
355 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
356
357 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
358 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
359 if (s->dest_mode == 0) {
360 cpu_fprintf(f, "(cluster %u: id %u)",
361 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
362 s->log_dest & APIC_LOGDEST_XAPIC_ID);
363 }
364 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
365 }
366 #else
367 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
368 fprintf_function cpu_fprintf, int flags)
369 {
370 }
371 #endif /* !CONFIG_USER_ONLY */
372
373 #define DUMP_CODE_BYTES_TOTAL 50
374 #define DUMP_CODE_BYTES_BACKWARD 20
375
376 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
377 int flags)
378 {
379 X86CPU *cpu = X86_CPU(cs);
380 CPUX86State *env = &cpu->env;
381 int eflags, i, nb;
382 char cc_op_name[32];
383 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
384
385 eflags = cpu_compute_eflags(env);
386 #ifdef TARGET_X86_64
387 if (env->hflags & HF_CS64_MASK) {
388 cpu_fprintf(f,
389 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
390 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
391 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
392 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
393 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
394 env->regs[R_EAX],
395 env->regs[R_EBX],
396 env->regs[R_ECX],
397 env->regs[R_EDX],
398 env->regs[R_ESI],
399 env->regs[R_EDI],
400 env->regs[R_EBP],
401 env->regs[R_ESP],
402 env->regs[8],
403 env->regs[9],
404 env->regs[10],
405 env->regs[11],
406 env->regs[12],
407 env->regs[13],
408 env->regs[14],
409 env->regs[15],
410 env->eip, eflags,
411 eflags & DF_MASK ? 'D' : '-',
412 eflags & CC_O ? 'O' : '-',
413 eflags & CC_S ? 'S' : '-',
414 eflags & CC_Z ? 'Z' : '-',
415 eflags & CC_A ? 'A' : '-',
416 eflags & CC_P ? 'P' : '-',
417 eflags & CC_C ? 'C' : '-',
418 env->hflags & HF_CPL_MASK,
419 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
420 (env->a20_mask >> 20) & 1,
421 (env->hflags >> HF_SMM_SHIFT) & 1,
422 cs->halted);
423 } else
424 #endif
425 {
426 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
427 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
428 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
429 (uint32_t)env->regs[R_EAX],
430 (uint32_t)env->regs[R_EBX],
431 (uint32_t)env->regs[R_ECX],
432 (uint32_t)env->regs[R_EDX],
433 (uint32_t)env->regs[R_ESI],
434 (uint32_t)env->regs[R_EDI],
435 (uint32_t)env->regs[R_EBP],
436 (uint32_t)env->regs[R_ESP],
437 (uint32_t)env->eip, eflags,
438 eflags & DF_MASK ? 'D' : '-',
439 eflags & CC_O ? 'O' : '-',
440 eflags & CC_S ? 'S' : '-',
441 eflags & CC_Z ? 'Z' : '-',
442 eflags & CC_A ? 'A' : '-',
443 eflags & CC_P ? 'P' : '-',
444 eflags & CC_C ? 'C' : '-',
445 env->hflags & HF_CPL_MASK,
446 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
447 (env->a20_mask >> 20) & 1,
448 (env->hflags >> HF_SMM_SHIFT) & 1,
449 cs->halted);
450 }
451
452 for(i = 0; i < 6; i++) {
453 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
454 &env->segs[i]);
455 }
456 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
457 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
458
459 #ifdef TARGET_X86_64
460 if (env->hflags & HF_LMA_MASK) {
461 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
462 env->gdt.base, env->gdt.limit);
463 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
464 env->idt.base, env->idt.limit);
465 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
466 (uint32_t)env->cr[0],
467 env->cr[2],
468 env->cr[3],
469 (uint32_t)env->cr[4]);
470 for(i = 0; i < 4; i++)
471 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
472 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
473 env->dr[6], env->dr[7]);
474 } else
475 #endif
476 {
477 cpu_fprintf(f, "GDT= %08x %08x\n",
478 (uint32_t)env->gdt.base, env->gdt.limit);
479 cpu_fprintf(f, "IDT= %08x %08x\n",
480 (uint32_t)env->idt.base, env->idt.limit);
481 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
482 (uint32_t)env->cr[0],
483 (uint32_t)env->cr[2],
484 (uint32_t)env->cr[3],
485 (uint32_t)env->cr[4]);
486 for(i = 0; i < 4; i++) {
487 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
488 }
489 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
490 env->dr[6], env->dr[7]);
491 }
492 if (flags & CPU_DUMP_CCOP) {
493 if ((unsigned)env->cc_op < CC_OP_NB)
494 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
495 else
496 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
497 #ifdef TARGET_X86_64
498 if (env->hflags & HF_CS64_MASK) {
499 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
500 env->cc_src, env->cc_dst,
501 cc_op_name);
502 } else
503 #endif
504 {
505 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
506 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
507 cc_op_name);
508 }
509 }
510 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
511 if (flags & CPU_DUMP_FPU) {
512 int fptag;
513 fptag = 0;
514 for(i = 0; i < 8; i++) {
515 fptag |= ((!env->fptags[i]) << i);
516 }
517 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
518 env->fpuc,
519 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
520 env->fpstt,
521 fptag,
522 env->mxcsr);
523 for(i=0;i<8;i++) {
524 CPU_LDoubleU u;
525 u.d = env->fpregs[i].d;
526 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
527 i, u.l.lower, u.l.upper);
528 if ((i & 1) == 1)
529 cpu_fprintf(f, "\n");
530 else
531 cpu_fprintf(f, " ");
532 }
533 if (env->hflags & HF_CS64_MASK)
534 nb = 16;
535 else
536 nb = 8;
537 for(i=0;i<nb;i++) {
538 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
539 i,
540 env->xmm_regs[i].ZMM_L(3),
541 env->xmm_regs[i].ZMM_L(2),
542 env->xmm_regs[i].ZMM_L(1),
543 env->xmm_regs[i].ZMM_L(0));
544 if ((i & 1) == 1)
545 cpu_fprintf(f, "\n");
546 else
547 cpu_fprintf(f, " ");
548 }
549 }
550 if (flags & CPU_DUMP_CODE) {
551 target_ulong base = env->segs[R_CS].base + env->eip;
552 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
553 uint8_t code;
554 char codestr[3];
555
556 cpu_fprintf(f, "Code=");
557 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
558 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
559 snprintf(codestr, sizeof(codestr), "%02x", code);
560 } else {
561 snprintf(codestr, sizeof(codestr), "??");
562 }
563 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
564 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
565 }
566 cpu_fprintf(f, "\n");
567 }
568 }
569
570 /***********************************************************/
571 /* x86 mmu */
572 /* XXX: add PGE support */
573
574 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
575 {
576 CPUX86State *env = &cpu->env;
577
578 a20_state = (a20_state != 0);
579 if (a20_state != ((env->a20_mask >> 20) & 1)) {
580 CPUState *cs = CPU(cpu);
581
582 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
583 /* if the cpu is currently executing code, we must unlink it and
584 all the potentially executing TB */
585 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
586
587 /* when a20 is changed, all the MMU mappings are invalid, so
588 we must flush everything */
589 tlb_flush(cs, 1);
590 env->a20_mask = ~(1 << 20) | (a20_state << 20);
591 }
592 }
593
594 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
595 {
596 X86CPU *cpu = x86_env_get_cpu(env);
597 int pe_state;
598
599 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
600 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
601 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
602 tlb_flush(CPU(cpu), 1);
603 }
604
605 #ifdef TARGET_X86_64
606 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
607 (env->efer & MSR_EFER_LME)) {
608 /* enter in long mode */
609 /* XXX: generate an exception */
610 if (!(env->cr[4] & CR4_PAE_MASK))
611 return;
612 env->efer |= MSR_EFER_LMA;
613 env->hflags |= HF_LMA_MASK;
614 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
615 (env->efer & MSR_EFER_LMA)) {
616 /* exit long mode */
617 env->efer &= ~MSR_EFER_LMA;
618 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
619 env->eip &= 0xffffffff;
620 }
621 #endif
622 env->cr[0] = new_cr0 | CR0_ET_MASK;
623
624 /* update PE flag in hidden flags */
625 pe_state = (env->cr[0] & CR0_PE_MASK);
626 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
627 /* ensure that ADDSEG is always set in real mode */
628 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
629 /* update FPU flags */
630 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
631 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
632 }
633
634 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
635 the PDPT */
636 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
637 {
638 X86CPU *cpu = x86_env_get_cpu(env);
639
640 env->cr[3] = new_cr3;
641 if (env->cr[0] & CR0_PG_MASK) {
642 qemu_log_mask(CPU_LOG_MMU,
643 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
644 tlb_flush(CPU(cpu), 0);
645 }
646 }
647
648 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
649 {
650 X86CPU *cpu = x86_env_get_cpu(env);
651 uint32_t hflags;
652
653 #if defined(DEBUG_MMU)
654 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
655 #endif
656 if ((new_cr4 ^ env->cr[4]) &
657 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
658 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
659 tlb_flush(CPU(cpu), 1);
660 }
661
662 /* Clear bits we're going to recompute. */
663 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
664
665 /* SSE handling */
666 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
667 new_cr4 &= ~CR4_OSFXSR_MASK;
668 }
669 if (new_cr4 & CR4_OSFXSR_MASK) {
670 hflags |= HF_OSFXSR_MASK;
671 }
672
673 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
674 new_cr4 &= ~CR4_SMAP_MASK;
675 }
676 if (new_cr4 & CR4_SMAP_MASK) {
677 hflags |= HF_SMAP_MASK;
678 }
679
680 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
681 new_cr4 &= ~CR4_PKE_MASK;
682 }
683
684 env->cr[4] = new_cr4;
685 env->hflags = hflags;
686
687 cpu_sync_bndcs_hflags(env);
688 }
689
690 #if defined(CONFIG_USER_ONLY)
691
692 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
693 int is_write, int mmu_idx)
694 {
695 X86CPU *cpu = X86_CPU(cs);
696 CPUX86State *env = &cpu->env;
697
698 /* user mode only emulation */
699 is_write &= 1;
700 env->cr[2] = addr;
701 env->error_code = (is_write << PG_ERROR_W_BIT);
702 env->error_code |= PG_ERROR_U_MASK;
703 cs->exception_index = EXCP0E_PAGE;
704 return 1;
705 }
706
707 #else
708
709 /* return value:
710 * -1 = cannot handle fault
711 * 0 = nothing more to do
712 * 1 = generate PF fault
713 */
714 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
715 int is_write1, int mmu_idx)
716 {
717 X86CPU *cpu = X86_CPU(cs);
718 CPUX86State *env = &cpu->env;
719 uint64_t ptep, pte;
720 target_ulong pde_addr, pte_addr;
721 int error_code = 0;
722 int is_dirty, prot, page_size, is_write, is_user;
723 hwaddr paddr;
724 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
725 uint32_t page_offset;
726 target_ulong vaddr;
727
728 is_user = mmu_idx == MMU_USER_IDX;
729 #if defined(DEBUG_MMU)
730 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
731 addr, is_write1, is_user, env->eip);
732 #endif
733 is_write = is_write1 & 1;
734
735 if (!(env->cr[0] & CR0_PG_MASK)) {
736 pte = addr;
737 #ifdef TARGET_X86_64
738 if (!(env->hflags & HF_LMA_MASK)) {
739 /* Without long mode we can only address 32bits in real mode */
740 pte = (uint32_t)pte;
741 }
742 #endif
743 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
744 page_size = 4096;
745 goto do_mapping;
746 }
747
748 if (!(env->efer & MSR_EFER_NXE)) {
749 rsvd_mask |= PG_NX_MASK;
750 }
751
752 if (env->cr[4] & CR4_PAE_MASK) {
753 uint64_t pde, pdpe;
754 target_ulong pdpe_addr;
755
756 #ifdef TARGET_X86_64
757 if (env->hflags & HF_LMA_MASK) {
758 uint64_t pml4e_addr, pml4e;
759 int32_t sext;
760
761 /* test virtual address sign extension */
762 sext = (int64_t)addr >> 47;
763 if (sext != 0 && sext != -1) {
764 env->error_code = 0;
765 cs->exception_index = EXCP0D_GPF;
766 return 1;
767 }
768
769 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
770 env->a20_mask;
771 pml4e = x86_ldq_phys(cs, pml4e_addr);
772 if (!(pml4e & PG_PRESENT_MASK)) {
773 goto do_fault;
774 }
775 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
776 goto do_fault_rsvd;
777 }
778 if (!(pml4e & PG_ACCESSED_MASK)) {
779 pml4e |= PG_ACCESSED_MASK;
780 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
781 }
782 ptep = pml4e ^ PG_NX_MASK;
783 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
784 env->a20_mask;
785 pdpe = x86_ldq_phys(cs, pdpe_addr);
786 if (!(pdpe & PG_PRESENT_MASK)) {
787 goto do_fault;
788 }
789 if (pdpe & rsvd_mask) {
790 goto do_fault_rsvd;
791 }
792 ptep &= pdpe ^ PG_NX_MASK;
793 if (!(pdpe & PG_ACCESSED_MASK)) {
794 pdpe |= PG_ACCESSED_MASK;
795 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
796 }
797 if (pdpe & PG_PSE_MASK) {
798 /* 1 GB page */
799 page_size = 1024 * 1024 * 1024;
800 pte_addr = pdpe_addr;
801 pte = pdpe;
802 goto do_check_protect;
803 }
804 } else
805 #endif
806 {
807 /* XXX: load them when cr3 is loaded ? */
808 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
809 env->a20_mask;
810 pdpe = x86_ldq_phys(cs, pdpe_addr);
811 if (!(pdpe & PG_PRESENT_MASK)) {
812 goto do_fault;
813 }
814 rsvd_mask |= PG_HI_USER_MASK;
815 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
816 goto do_fault_rsvd;
817 }
818 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
819 }
820
821 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
822 env->a20_mask;
823 pde = x86_ldq_phys(cs, pde_addr);
824 if (!(pde & PG_PRESENT_MASK)) {
825 goto do_fault;
826 }
827 if (pde & rsvd_mask) {
828 goto do_fault_rsvd;
829 }
830 ptep &= pde ^ PG_NX_MASK;
831 if (pde & PG_PSE_MASK) {
832 /* 2 MB page */
833 page_size = 2048 * 1024;
834 pte_addr = pde_addr;
835 pte = pde;
836 goto do_check_protect;
837 }
838 /* 4 KB page */
839 if (!(pde & PG_ACCESSED_MASK)) {
840 pde |= PG_ACCESSED_MASK;
841 x86_stl_phys_notdirty(cs, pde_addr, pde);
842 }
843 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
844 env->a20_mask;
845 pte = x86_ldq_phys(cs, pte_addr);
846 if (!(pte & PG_PRESENT_MASK)) {
847 goto do_fault;
848 }
849 if (pte & rsvd_mask) {
850 goto do_fault_rsvd;
851 }
852 /* combine pde and pte nx, user and rw protections */
853 ptep &= pte ^ PG_NX_MASK;
854 page_size = 4096;
855 } else {
856 uint32_t pde;
857
858 /* page directory entry */
859 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
860 env->a20_mask;
861 pde = x86_ldl_phys(cs, pde_addr);
862 if (!(pde & PG_PRESENT_MASK)) {
863 goto do_fault;
864 }
865 ptep = pde | PG_NX_MASK;
866
867 /* if PSE bit is set, then we use a 4MB page */
868 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
869 page_size = 4096 * 1024;
870 pte_addr = pde_addr;
871
872 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
873 * Leave bits 20-13 in place for setting accessed/dirty bits below.
874 */
875 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
876 rsvd_mask = 0x200000;
877 goto do_check_protect_pse36;
878 }
879
880 if (!(pde & PG_ACCESSED_MASK)) {
881 pde |= PG_ACCESSED_MASK;
882 x86_stl_phys_notdirty(cs, pde_addr, pde);
883 }
884
885 /* page directory entry */
886 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
887 env->a20_mask;
888 pte = x86_ldl_phys(cs, pte_addr);
889 if (!(pte & PG_PRESENT_MASK)) {
890 goto do_fault;
891 }
892 /* combine pde and pte user and rw protections */
893 ptep &= pte | PG_NX_MASK;
894 page_size = 4096;
895 rsvd_mask = 0;
896 }
897
898 do_check_protect:
899 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
900 do_check_protect_pse36:
901 if (pte & rsvd_mask) {
902 goto do_fault_rsvd;
903 }
904 ptep ^= PG_NX_MASK;
905
906 /* can the page can be put in the TLB? prot will tell us */
907 if (is_user && !(ptep & PG_USER_MASK)) {
908 goto do_fault_protect;
909 }
910
911 prot = 0;
912 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
913 prot |= PAGE_READ;
914 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
915 prot |= PAGE_WRITE;
916 }
917 }
918 if (!(ptep & PG_NX_MASK) &&
919 (mmu_idx == MMU_USER_IDX ||
920 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
921 prot |= PAGE_EXEC;
922 }
923 if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
924 (ptep & PG_USER_MASK) && env->pkru) {
925 uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
926 uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
927 uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
928 uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
929
930 if (pkru_ad) {
931 pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
932 } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
933 pkru_prot &= ~PAGE_WRITE;
934 }
935
936 prot &= pkru_prot;
937 if ((pkru_prot & (1 << is_write1)) == 0) {
938 assert(is_write1 != 2);
939 error_code |= PG_ERROR_PK_MASK;
940 goto do_fault_protect;
941 }
942 }
943
944 if ((prot & (1 << is_write1)) == 0) {
945 goto do_fault_protect;
946 }
947
948 /* yes, it can! */
949 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
950 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
951 pte |= PG_ACCESSED_MASK;
952 if (is_dirty) {
953 pte |= PG_DIRTY_MASK;
954 }
955 x86_stl_phys_notdirty(cs, pte_addr, pte);
956 }
957
958 if (!(pte & PG_DIRTY_MASK)) {
959 /* only set write access if already dirty... otherwise wait
960 for dirty access */
961 assert(!is_write);
962 prot &= ~PAGE_WRITE;
963 }
964
965 do_mapping:
966 pte = pte & env->a20_mask;
967
968 /* align to page_size */
969 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
970
971 /* Even if 4MB pages, we map only one 4KB page in the cache to
972 avoid filling it too fast */
973 vaddr = addr & TARGET_PAGE_MASK;
974 page_offset = vaddr & (page_size - 1);
975 paddr = pte + page_offset;
976
977 assert(prot & (1 << is_write1));
978 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
979 prot, mmu_idx, page_size);
980 return 0;
981 do_fault_rsvd:
982 error_code |= PG_ERROR_RSVD_MASK;
983 do_fault_protect:
984 error_code |= PG_ERROR_P_MASK;
985 do_fault:
986 error_code |= (is_write << PG_ERROR_W_BIT);
987 if (is_user)
988 error_code |= PG_ERROR_U_MASK;
989 if (is_write1 == 2 &&
990 (((env->efer & MSR_EFER_NXE) &&
991 (env->cr[4] & CR4_PAE_MASK)) ||
992 (env->cr[4] & CR4_SMEP_MASK)))
993 error_code |= PG_ERROR_I_D_MASK;
994 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
995 /* cr2 is not modified in case of exceptions */
996 x86_stq_phys(cs,
997 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
998 addr);
999 } else {
1000 env->cr[2] = addr;
1001 }
1002 env->error_code = error_code;
1003 cs->exception_index = EXCP0E_PAGE;
1004 return 1;
1005 }
1006
1007 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1008 {
1009 X86CPU *cpu = X86_CPU(cs);
1010 CPUX86State *env = &cpu->env;
1011 target_ulong pde_addr, pte_addr;
1012 uint64_t pte;
1013 uint32_t page_offset;
1014 int page_size;
1015
1016 if (!(env->cr[0] & CR0_PG_MASK)) {
1017 pte = addr & env->a20_mask;
1018 page_size = 4096;
1019 } else if (env->cr[4] & CR4_PAE_MASK) {
1020 target_ulong pdpe_addr;
1021 uint64_t pde, pdpe;
1022
1023 #ifdef TARGET_X86_64
1024 if (env->hflags & HF_LMA_MASK) {
1025 uint64_t pml4e_addr, pml4e;
1026 int32_t sext;
1027
1028 /* test virtual address sign extension */
1029 sext = (int64_t)addr >> 47;
1030 if (sext != 0 && sext != -1) {
1031 return -1;
1032 }
1033 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1034 env->a20_mask;
1035 pml4e = x86_ldq_phys(cs, pml4e_addr);
1036 if (!(pml4e & PG_PRESENT_MASK)) {
1037 return -1;
1038 }
1039 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
1040 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
1041 pdpe = x86_ldq_phys(cs, pdpe_addr);
1042 if (!(pdpe & PG_PRESENT_MASK)) {
1043 return -1;
1044 }
1045 if (pdpe & PG_PSE_MASK) {
1046 page_size = 1024 * 1024 * 1024;
1047 pte = pdpe;
1048 goto out;
1049 }
1050
1051 } else
1052 #endif
1053 {
1054 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1055 env->a20_mask;
1056 pdpe = x86_ldq_phys(cs, pdpe_addr);
1057 if (!(pdpe & PG_PRESENT_MASK))
1058 return -1;
1059 }
1060
1061 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
1062 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
1063 pde = x86_ldq_phys(cs, pde_addr);
1064 if (!(pde & PG_PRESENT_MASK)) {
1065 return -1;
1066 }
1067 if (pde & PG_PSE_MASK) {
1068 /* 2 MB page */
1069 page_size = 2048 * 1024;
1070 pte = pde;
1071 } else {
1072 /* 4 KB page */
1073 pte_addr = ((pde & PG_ADDRESS_MASK) +
1074 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
1075 page_size = 4096;
1076 pte = x86_ldq_phys(cs, pte_addr);
1077 }
1078 if (!(pte & PG_PRESENT_MASK)) {
1079 return -1;
1080 }
1081 } else {
1082 uint32_t pde;
1083
1084 /* page directory entry */
1085 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1086 pde = x86_ldl_phys(cs, pde_addr);
1087 if (!(pde & PG_PRESENT_MASK))
1088 return -1;
1089 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1090 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
1091 page_size = 4096 * 1024;
1092 } else {
1093 /* page directory entry */
1094 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1095 pte = x86_ldl_phys(cs, pte_addr);
1096 if (!(pte & PG_PRESENT_MASK)) {
1097 return -1;
1098 }
1099 page_size = 4096;
1100 }
1101 pte = pte & env->a20_mask;
1102 }
1103
1104 #ifdef TARGET_X86_64
1105 out:
1106 #endif
1107 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1108 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1109 return pte | page_offset;
1110 }
1111
1112 typedef struct MCEInjectionParams {
1113 Monitor *mon;
1114 X86CPU *cpu;
1115 int bank;
1116 uint64_t status;
1117 uint64_t mcg_status;
1118 uint64_t addr;
1119 uint64_t misc;
1120 int flags;
1121 } MCEInjectionParams;
1122
1123 static void do_inject_x86_mce(void *data)
1124 {
1125 MCEInjectionParams *params = data;
1126 CPUX86State *cenv = &params->cpu->env;
1127 CPUState *cpu = CPU(params->cpu);
1128 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1129
1130 cpu_synchronize_state(cpu);
1131
1132 /*
1133 * If there is an MCE exception being processed, ignore this SRAO MCE
1134 * unless unconditional injection was requested.
1135 */
1136 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1137 && !(params->status & MCI_STATUS_AR)
1138 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1139 return;
1140 }
1141
1142 if (params->status & MCI_STATUS_UC) {
1143 /*
1144 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1145 * reporting is disabled
1146 */
1147 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1148 monitor_printf(params->mon,
1149 "CPU %d: Uncorrected error reporting disabled\n",
1150 cpu->cpu_index);
1151 return;
1152 }
1153
1154 /*
1155 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1156 * reporting is disabled for the bank
1157 */
1158 if (banks[0] != ~(uint64_t)0) {
1159 monitor_printf(params->mon,
1160 "CPU %d: Uncorrected error reporting disabled for"
1161 " bank %d\n",
1162 cpu->cpu_index, params->bank);
1163 return;
1164 }
1165
1166 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1167 !(cenv->cr[4] & CR4_MCE_MASK)) {
1168 monitor_printf(params->mon,
1169 "CPU %d: Previous MCE still in progress, raising"
1170 " triple fault\n",
1171 cpu->cpu_index);
1172 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1173 qemu_system_reset_request();
1174 return;
1175 }
1176 if (banks[1] & MCI_STATUS_VAL) {
1177 params->status |= MCI_STATUS_OVER;
1178 }
1179 banks[2] = params->addr;
1180 banks[3] = params->misc;
1181 cenv->mcg_status = params->mcg_status;
1182 banks[1] = params->status;
1183 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1184 } else if (!(banks[1] & MCI_STATUS_VAL)
1185 || !(banks[1] & MCI_STATUS_UC)) {
1186 if (banks[1] & MCI_STATUS_VAL) {
1187 params->status |= MCI_STATUS_OVER;
1188 }
1189 banks[2] = params->addr;
1190 banks[3] = params->misc;
1191 banks[1] = params->status;
1192 } else {
1193 banks[1] |= MCI_STATUS_OVER;
1194 }
1195 }
1196
1197 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1198 uint64_t status, uint64_t mcg_status, uint64_t addr,
1199 uint64_t misc, int flags)
1200 {
1201 CPUState *cs = CPU(cpu);
1202 CPUX86State *cenv = &cpu->env;
1203 MCEInjectionParams params = {
1204 .mon = mon,
1205 .cpu = cpu,
1206 .bank = bank,
1207 .status = status,
1208 .mcg_status = mcg_status,
1209 .addr = addr,
1210 .misc = misc,
1211 .flags = flags,
1212 };
1213 unsigned bank_num = cenv->mcg_cap & 0xff;
1214
1215 if (!cenv->mcg_cap) {
1216 monitor_printf(mon, "MCE injection not supported\n");
1217 return;
1218 }
1219 if (bank >= bank_num) {
1220 monitor_printf(mon, "Invalid MCE bank number\n");
1221 return;
1222 }
1223 if (!(status & MCI_STATUS_VAL)) {
1224 monitor_printf(mon, "Invalid MCE status code\n");
1225 return;
1226 }
1227 if ((flags & MCE_INJECT_BROADCAST)
1228 && !cpu_x86_support_mca_broadcast(cenv)) {
1229 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1230 return;
1231 }
1232
1233 run_on_cpu(cs, do_inject_x86_mce, &params);
1234 if (flags & MCE_INJECT_BROADCAST) {
1235 CPUState *other_cs;
1236
1237 params.bank = 1;
1238 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1239 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1240 params.addr = 0;
1241 params.misc = 0;
1242 CPU_FOREACH(other_cs) {
1243 if (other_cs == cs) {
1244 continue;
1245 }
1246 params.cpu = X86_CPU(other_cs);
1247 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1248 }
1249 }
1250 }
1251
1252 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1253 {
1254 X86CPU *cpu = x86_env_get_cpu(env);
1255 CPUState *cs = CPU(cpu);
1256
1257 if (kvm_enabled()) {
1258 env->tpr_access_type = access;
1259
1260 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1261 } else {
1262 cpu_restore_state(cs, cs->mem_io_pc);
1263
1264 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1265 }
1266 }
1267 #endif /* !CONFIG_USER_ONLY */
1268
1269 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1270 target_ulong *base, unsigned int *limit,
1271 unsigned int *flags)
1272 {
1273 X86CPU *cpu = x86_env_get_cpu(env);
1274 CPUState *cs = CPU(cpu);
1275 SegmentCache *dt;
1276 target_ulong ptr;
1277 uint32_t e1, e2;
1278 int index;
1279
1280 if (selector & 0x4)
1281 dt = &env->ldt;
1282 else
1283 dt = &env->gdt;
1284 index = selector & ~7;
1285 ptr = dt->base + index;
1286 if ((index + 7) > dt->limit
1287 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1288 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1289 return 0;
1290
1291 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1292 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1293 if (e2 & DESC_G_MASK)
1294 *limit = (*limit << 12) | 0xfff;
1295 *flags = e2;
1296
1297 return 1;
1298 }
1299
1300 #if !defined(CONFIG_USER_ONLY)
1301 void do_cpu_init(X86CPU *cpu)
1302 {
1303 CPUState *cs = CPU(cpu);
1304 CPUX86State *env = &cpu->env;
1305 CPUX86State *save = g_new(CPUX86State, 1);
1306 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1307
1308 *save = *env;
1309
1310 cpu_reset(cs);
1311 cs->interrupt_request = sipi;
1312 memcpy(&env->start_init_save, &save->start_init_save,
1313 offsetof(CPUX86State, end_init_save) -
1314 offsetof(CPUX86State, start_init_save));
1315 g_free(save);
1316
1317 if (kvm_enabled()) {
1318 kvm_arch_do_init_vcpu(cpu);
1319 }
1320 apic_init_reset(cpu->apic_state);
1321 }
1322
1323 void do_cpu_sipi(X86CPU *cpu)
1324 {
1325 apic_sipi(cpu->apic_state);
1326 }
1327 #else
1328 void do_cpu_init(X86CPU *cpu)
1329 {
1330 }
1331 void do_cpu_sipi(X86CPU *cpu)
1332 {
1333 }
1334 #endif
1335
1336 /* Frob eflags into and out of the CPU temporary format. */
1337
1338 void x86_cpu_exec_enter(CPUState *cs)
1339 {
1340 X86CPU *cpu = X86_CPU(cs);
1341 CPUX86State *env = &cpu->env;
1342
1343 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1344 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1345 CC_OP = CC_OP_EFLAGS;
1346 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1347 }
1348
1349 void x86_cpu_exec_exit(CPUState *cs)
1350 {
1351 X86CPU *cpu = X86_CPU(cs);
1352 CPUX86State *env = &cpu->env;
1353
1354 env->eflags = cpu_compute_eflags(env);
1355 }
1356
1357 #ifndef CONFIG_USER_ONLY
1358 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1359 {
1360 X86CPU *cpu = X86_CPU(cs);
1361 CPUX86State *env = &cpu->env;
1362
1363 return address_space_ldub(cs->as, addr,
1364 cpu_get_mem_attrs(env),
1365 NULL);
1366 }
1367
1368 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1369 {
1370 X86CPU *cpu = X86_CPU(cs);
1371 CPUX86State *env = &cpu->env;
1372
1373 return address_space_lduw(cs->as, addr,
1374 cpu_get_mem_attrs(env),
1375 NULL);
1376 }
1377
1378 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1379 {
1380 X86CPU *cpu = X86_CPU(cs);
1381 CPUX86State *env = &cpu->env;
1382
1383 return address_space_ldl(cs->as, addr,
1384 cpu_get_mem_attrs(env),
1385 NULL);
1386 }
1387
1388 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1389 {
1390 X86CPU *cpu = X86_CPU(cs);
1391 CPUX86State *env = &cpu->env;
1392
1393 return address_space_ldq(cs->as, addr,
1394 cpu_get_mem_attrs(env),
1395 NULL);
1396 }
1397
1398 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1399 {
1400 X86CPU *cpu = X86_CPU(cs);
1401 CPUX86State *env = &cpu->env;
1402
1403 address_space_stb(cs->as, addr, val,
1404 cpu_get_mem_attrs(env),
1405 NULL);
1406 }
1407
1408 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1409 {
1410 X86CPU *cpu = X86_CPU(cs);
1411 CPUX86State *env = &cpu->env;
1412
1413 address_space_stl_notdirty(cs->as, addr, val,
1414 cpu_get_mem_attrs(env),
1415 NULL);
1416 }
1417
1418 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1419 {
1420 X86CPU *cpu = X86_CPU(cs);
1421 CPUX86State *env = &cpu->env;
1422
1423 address_space_stw(cs->as, addr, val,
1424 cpu_get_mem_attrs(env),
1425 NULL);
1426 }
1427
1428 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1429 {
1430 X86CPU *cpu = X86_CPU(cs);
1431 CPUX86State *env = &cpu->env;
1432
1433 address_space_stl(cs->as, addr, val,
1434 cpu_get_mem_attrs(env),
1435 NULL);
1436 }
1437
1438 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1439 {
1440 X86CPU *cpu = X86_CPU(cs);
1441 CPUX86State *env = &cpu->env;
1442
1443 address_space_stq(cs->as, addr, val,
1444 cpu_get_mem_attrs(env),
1445 NULL);
1446 }
1447 #endif