i386/kvm: Blacklist TSX on known broken hosts
[qemu.git] / target / i386 / kvm.c
1 /*
2 * QEMU KVM support
3 *
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
19
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
22
23 #include "qemu-common.h"
24 #include "cpu.h"
25 #include "sysemu/sysemu.h"
26 #include "sysemu/hw_accel.h"
27 #include "sysemu/kvm_int.h"
28 #include "kvm_i386.h"
29 #include "hyperv.h"
30
31 #include "exec/gdbstub.h"
32 #include "qemu/host-utils.h"
33 #include "qemu/config-file.h"
34 #include "qemu/error-report.h"
35 #include "hw/i386/pc.h"
36 #include "hw/i386/apic.h"
37 #include "hw/i386/apic_internal.h"
38 #include "hw/i386/apic-msidef.h"
39 #include "hw/i386/intel_iommu.h"
40 #include "hw/i386/x86-iommu.h"
41
42 #include "exec/ioport.h"
43 #include "standard-headers/asm-x86/hyperv.h"
44 #include "hw/pci/pci.h"
45 #include "hw/pci/msi.h"
46 #include "migration/migration.h"
47 #include "exec/memattrs.h"
48 #include "trace.h"
49
50 //#define DEBUG_KVM
51
52 #ifdef DEBUG_KVM
53 #define DPRINTF(fmt, ...) \
54 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
55 #else
56 #define DPRINTF(fmt, ...) \
57 do { } while (0)
58 #endif
59
60 #define MSR_KVM_WALL_CLOCK 0x11
61 #define MSR_KVM_SYSTEM_TIME 0x12
62
63 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
64 * 255 kvm_msr_entry structs */
65 #define MSR_BUF_SIZE 4096
66
67 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
68 KVM_CAP_INFO(SET_TSS_ADDR),
69 KVM_CAP_INFO(EXT_CPUID),
70 KVM_CAP_INFO(MP_STATE),
71 KVM_CAP_LAST_INFO
72 };
73
74 static bool has_msr_star;
75 static bool has_msr_hsave_pa;
76 static bool has_msr_tsc_aux;
77 static bool has_msr_tsc_adjust;
78 static bool has_msr_tsc_deadline;
79 static bool has_msr_feature_control;
80 static bool has_msr_misc_enable;
81 static bool has_msr_smbase;
82 static bool has_msr_bndcfgs;
83 static int lm_capable_kernel;
84 static bool has_msr_hv_hypercall;
85 static bool has_msr_hv_crash;
86 static bool has_msr_hv_reset;
87 static bool has_msr_hv_vpindex;
88 static bool has_msr_hv_runtime;
89 static bool has_msr_hv_synic;
90 static bool has_msr_hv_stimer;
91 static bool has_msr_xss;
92
93 static bool has_msr_architectural_pmu;
94 static uint32_t num_architectural_pmu_counters;
95
96 static int has_xsave;
97 static int has_xcrs;
98 static int has_pit_state2;
99
100 static bool has_msr_mcg_ext_ctl;
101
102 static struct kvm_cpuid2 *cpuid_cache;
103
104 int kvm_has_pit_state2(void)
105 {
106 return has_pit_state2;
107 }
108
109 bool kvm_has_smm(void)
110 {
111 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
112 }
113
114 bool kvm_has_adjust_clock_stable(void)
115 {
116 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
117
118 return (ret == KVM_CLOCK_TSC_STABLE);
119 }
120
121 bool kvm_allows_irq0_override(void)
122 {
123 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
124 }
125
126 static bool kvm_x2apic_api_set_flags(uint64_t flags)
127 {
128 KVMState *s = KVM_STATE(current_machine->accelerator);
129
130 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
131 }
132
133 #define MEMORIZE(fn, _result) \
134 ({ \
135 static bool _memorized; \
136 \
137 if (_memorized) { \
138 return _result; \
139 } \
140 _memorized = true; \
141 _result = fn; \
142 })
143
144 static bool has_x2apic_api;
145
146 bool kvm_has_x2apic_api(void)
147 {
148 return has_x2apic_api;
149 }
150
151 bool kvm_enable_x2apic(void)
152 {
153 return MEMORIZE(
154 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
155 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
156 has_x2apic_api);
157 }
158
159 static int kvm_get_tsc(CPUState *cs)
160 {
161 X86CPU *cpu = X86_CPU(cs);
162 CPUX86State *env = &cpu->env;
163 struct {
164 struct kvm_msrs info;
165 struct kvm_msr_entry entries[1];
166 } msr_data;
167 int ret;
168
169 if (env->tsc_valid) {
170 return 0;
171 }
172
173 msr_data.info.nmsrs = 1;
174 msr_data.entries[0].index = MSR_IA32_TSC;
175 env->tsc_valid = !runstate_is_running();
176
177 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
178 if (ret < 0) {
179 return ret;
180 }
181
182 assert(ret == 1);
183 env->tsc = msr_data.entries[0].data;
184 return 0;
185 }
186
187 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
188 {
189 kvm_get_tsc(cpu);
190 }
191
192 void kvm_synchronize_all_tsc(void)
193 {
194 CPUState *cpu;
195
196 if (kvm_enabled()) {
197 CPU_FOREACH(cpu) {
198 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
199 }
200 }
201 }
202
203 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
204 {
205 struct kvm_cpuid2 *cpuid;
206 int r, size;
207
208 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
209 cpuid = g_malloc0(size);
210 cpuid->nent = max;
211 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
212 if (r == 0 && cpuid->nent >= max) {
213 r = -E2BIG;
214 }
215 if (r < 0) {
216 if (r == -E2BIG) {
217 g_free(cpuid);
218 return NULL;
219 } else {
220 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
221 strerror(-r));
222 exit(1);
223 }
224 }
225 return cpuid;
226 }
227
228 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
229 * for all entries.
230 */
231 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
232 {
233 struct kvm_cpuid2 *cpuid;
234 int max = 1;
235
236 if (cpuid_cache != NULL) {
237 return cpuid_cache;
238 }
239 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
240 max *= 2;
241 }
242 cpuid_cache = cpuid;
243 return cpuid;
244 }
245
246 static const struct kvm_para_features {
247 int cap;
248 int feature;
249 } para_features[] = {
250 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
251 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
252 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
253 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
254 };
255
256 static int get_para_features(KVMState *s)
257 {
258 int i, features = 0;
259
260 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
261 if (kvm_check_extension(s, para_features[i].cap)) {
262 features |= (1 << para_features[i].feature);
263 }
264 }
265
266 return features;
267 }
268
269 static bool host_tsx_blacklisted(void)
270 {
271 int family, model, stepping;\
272 char vendor[CPUID_VENDOR_SZ + 1];
273
274 host_vendor_fms(vendor, &family, &model, &stepping);
275
276 /* Check if we are running on a Haswell host known to have broken TSX */
277 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
278 (family == 6) &&
279 ((model == 63 && stepping < 4) ||
280 model == 60 || model == 69 || model == 70);
281 }
282
283 /* Returns the value for a specific register on the cpuid entry
284 */
285 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
286 {
287 uint32_t ret = 0;
288 switch (reg) {
289 case R_EAX:
290 ret = entry->eax;
291 break;
292 case R_EBX:
293 ret = entry->ebx;
294 break;
295 case R_ECX:
296 ret = entry->ecx;
297 break;
298 case R_EDX:
299 ret = entry->edx;
300 break;
301 }
302 return ret;
303 }
304
305 /* Find matching entry for function/index on kvm_cpuid2 struct
306 */
307 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
308 uint32_t function,
309 uint32_t index)
310 {
311 int i;
312 for (i = 0; i < cpuid->nent; ++i) {
313 if (cpuid->entries[i].function == function &&
314 cpuid->entries[i].index == index) {
315 return &cpuid->entries[i];
316 }
317 }
318 /* not found: */
319 return NULL;
320 }
321
322 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
323 uint32_t index, int reg)
324 {
325 struct kvm_cpuid2 *cpuid;
326 uint32_t ret = 0;
327 uint32_t cpuid_1_edx;
328 bool found = false;
329
330 cpuid = get_supported_cpuid(s);
331
332 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
333 if (entry) {
334 found = true;
335 ret = cpuid_entry_get_reg(entry, reg);
336 }
337
338 /* Fixups for the data returned by KVM, below */
339
340 if (function == 1 && reg == R_EDX) {
341 /* KVM before 2.6.30 misreports the following features */
342 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
343 } else if (function == 1 && reg == R_ECX) {
344 /* We can set the hypervisor flag, even if KVM does not return it on
345 * GET_SUPPORTED_CPUID
346 */
347 ret |= CPUID_EXT_HYPERVISOR;
348 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
349 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
350 * and the irqchip is in the kernel.
351 */
352 if (kvm_irqchip_in_kernel() &&
353 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
354 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
355 }
356
357 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
358 * without the in-kernel irqchip
359 */
360 if (!kvm_irqchip_in_kernel()) {
361 ret &= ~CPUID_EXT_X2APIC;
362 }
363 } else if (function == 6 && reg == R_EAX) {
364 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
365 } else if (function == 7 && index == 0 && reg == R_EBX) {
366 if (host_tsx_blacklisted()) {
367 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
368 }
369 } else if (function == 0x80000001 && reg == R_EDX) {
370 /* On Intel, kvm returns cpuid according to the Intel spec,
371 * so add missing bits according to the AMD spec:
372 */
373 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
374 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
375 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
376 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
377 * be enabled without the in-kernel irqchip
378 */
379 if (!kvm_irqchip_in_kernel()) {
380 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
381 }
382 }
383
384 /* fallback for older kernels */
385 if ((function == KVM_CPUID_FEATURES) && !found) {
386 ret = get_para_features(s);
387 }
388
389 return ret;
390 }
391
392 typedef struct HWPoisonPage {
393 ram_addr_t ram_addr;
394 QLIST_ENTRY(HWPoisonPage) list;
395 } HWPoisonPage;
396
397 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
398 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
399
400 static void kvm_unpoison_all(void *param)
401 {
402 HWPoisonPage *page, *next_page;
403
404 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
405 QLIST_REMOVE(page, list);
406 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
407 g_free(page);
408 }
409 }
410
411 static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
412 {
413 HWPoisonPage *page;
414
415 QLIST_FOREACH(page, &hwpoison_page_list, list) {
416 if (page->ram_addr == ram_addr) {
417 return;
418 }
419 }
420 page = g_new(HWPoisonPage, 1);
421 page->ram_addr = ram_addr;
422 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
423 }
424
425 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
426 int *max_banks)
427 {
428 int r;
429
430 r = kvm_check_extension(s, KVM_CAP_MCE);
431 if (r > 0) {
432 *max_banks = r;
433 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
434 }
435 return -ENOSYS;
436 }
437
438 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
439 {
440 CPUState *cs = CPU(cpu);
441 CPUX86State *env = &cpu->env;
442 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
443 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
444 uint64_t mcg_status = MCG_STATUS_MCIP;
445 int flags = 0;
446
447 if (code == BUS_MCEERR_AR) {
448 status |= MCI_STATUS_AR | 0x134;
449 mcg_status |= MCG_STATUS_EIPV;
450 } else {
451 status |= 0xc0;
452 mcg_status |= MCG_STATUS_RIPV;
453 }
454
455 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
456 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
457 * guest kernel back into env->mcg_ext_ctl.
458 */
459 cpu_synchronize_state(cs);
460 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
461 mcg_status |= MCG_STATUS_LMCE;
462 flags = 0;
463 }
464
465 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
466 (MCM_ADDR_PHYS << 6) | 0xc, flags);
467 }
468
469 static void hardware_memory_error(void)
470 {
471 fprintf(stderr, "Hardware memory error!\n");
472 exit(1);
473 }
474
475 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
476 {
477 X86CPU *cpu = X86_CPU(c);
478 CPUX86State *env = &cpu->env;
479 ram_addr_t ram_addr;
480 hwaddr paddr;
481
482 /* If we get an action required MCE, it has been injected by KVM
483 * while the VM was running. An action optional MCE instead should
484 * be coming from the main thread, which qemu_init_sigbus identifies
485 * as the "early kill" thread.
486 */
487 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
488
489 if ((env->mcg_cap & MCG_SER_P) && addr) {
490 ram_addr = qemu_ram_addr_from_host(addr);
491 if (ram_addr != RAM_ADDR_INVALID &&
492 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
493 kvm_hwpoison_page_add(ram_addr);
494 kvm_mce_inject(cpu, paddr, code);
495 return;
496 }
497
498 fprintf(stderr, "Hardware memory error for memory used by "
499 "QEMU itself instead of guest system!\n");
500 }
501
502 if (code == BUS_MCEERR_AR) {
503 hardware_memory_error();
504 }
505
506 /* Hope we are lucky for AO MCE */
507 }
508
509 static int kvm_inject_mce_oldstyle(X86CPU *cpu)
510 {
511 CPUX86State *env = &cpu->env;
512
513 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
514 unsigned int bank, bank_num = env->mcg_cap & 0xff;
515 struct kvm_x86_mce mce;
516
517 env->exception_injected = -1;
518
519 /*
520 * There must be at least one bank in use if an MCE is pending.
521 * Find it and use its values for the event injection.
522 */
523 for (bank = 0; bank < bank_num; bank++) {
524 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
525 break;
526 }
527 }
528 assert(bank < bank_num);
529
530 mce.bank = bank;
531 mce.status = env->mce_banks[bank * 4 + 1];
532 mce.mcg_status = env->mcg_status;
533 mce.addr = env->mce_banks[bank * 4 + 2];
534 mce.misc = env->mce_banks[bank * 4 + 3];
535
536 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
537 }
538 return 0;
539 }
540
541 static void cpu_update_state(void *opaque, int running, RunState state)
542 {
543 CPUX86State *env = opaque;
544
545 if (running) {
546 env->tsc_valid = false;
547 }
548 }
549
550 unsigned long kvm_arch_vcpu_id(CPUState *cs)
551 {
552 X86CPU *cpu = X86_CPU(cs);
553 return cpu->apic_id;
554 }
555
556 #ifndef KVM_CPUID_SIGNATURE_NEXT
557 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
558 #endif
559
560 static bool hyperv_hypercall_available(X86CPU *cpu)
561 {
562 return cpu->hyperv_vapic ||
563 (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
564 }
565
566 static bool hyperv_enabled(X86CPU *cpu)
567 {
568 CPUState *cs = CPU(cpu);
569 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
570 (hyperv_hypercall_available(cpu) ||
571 cpu->hyperv_time ||
572 cpu->hyperv_relaxed_timing ||
573 cpu->hyperv_crash ||
574 cpu->hyperv_reset ||
575 cpu->hyperv_vpindex ||
576 cpu->hyperv_runtime ||
577 cpu->hyperv_synic ||
578 cpu->hyperv_stimer);
579 }
580
581 static int kvm_arch_set_tsc_khz(CPUState *cs)
582 {
583 X86CPU *cpu = X86_CPU(cs);
584 CPUX86State *env = &cpu->env;
585 int r;
586
587 if (!env->tsc_khz) {
588 return 0;
589 }
590
591 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
592 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
593 -ENOTSUP;
594 if (r < 0) {
595 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
596 * TSC frequency doesn't match the one we want.
597 */
598 int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
599 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
600 -ENOTSUP;
601 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
602 error_report("warning: TSC frequency mismatch between "
603 "VM (%" PRId64 " kHz) and host (%d kHz), "
604 "and TSC scaling unavailable",
605 env->tsc_khz, cur_freq);
606 return r;
607 }
608 }
609
610 return 0;
611 }
612
613 static int hyperv_handle_properties(CPUState *cs)
614 {
615 X86CPU *cpu = X86_CPU(cs);
616 CPUX86State *env = &cpu->env;
617
618 if (cpu->hyperv_time &&
619 kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
620 cpu->hyperv_time = false;
621 }
622
623 if (cpu->hyperv_relaxed_timing) {
624 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
625 }
626 if (cpu->hyperv_vapic) {
627 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
628 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
629 }
630 if (cpu->hyperv_time) {
631 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
632 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
633 env->features[FEAT_HYPERV_EAX] |= 0x200;
634 }
635 if (cpu->hyperv_crash && has_msr_hv_crash) {
636 env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
637 }
638 env->features[FEAT_HYPERV_EDX] |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
639 if (cpu->hyperv_reset && has_msr_hv_reset) {
640 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_RESET_AVAILABLE;
641 }
642 if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
643 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_INDEX_AVAILABLE;
644 }
645 if (cpu->hyperv_runtime && has_msr_hv_runtime) {
646 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
647 }
648 if (cpu->hyperv_synic) {
649 int sint;
650
651 if (!has_msr_hv_synic ||
652 kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
653 fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
654 return -ENOSYS;
655 }
656
657 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNIC_AVAILABLE;
658 env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
659 for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
660 env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
661 }
662 }
663 if (cpu->hyperv_stimer) {
664 if (!has_msr_hv_stimer) {
665 fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
666 return -ENOSYS;
667 }
668 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNTIMER_AVAILABLE;
669 }
670 return 0;
671 }
672
673 static Error *invtsc_mig_blocker;
674
675 #define KVM_MAX_CPUID_ENTRIES 100
676
677 int kvm_arch_init_vcpu(CPUState *cs)
678 {
679 struct {
680 struct kvm_cpuid2 cpuid;
681 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
682 } QEMU_PACKED cpuid_data;
683 X86CPU *cpu = X86_CPU(cs);
684 CPUX86State *env = &cpu->env;
685 uint32_t limit, i, j, cpuid_i;
686 uint32_t unused;
687 struct kvm_cpuid_entry2 *c;
688 uint32_t signature[3];
689 int kvm_base = KVM_CPUID_SIGNATURE;
690 int r;
691 Error *local_err = NULL;
692
693 memset(&cpuid_data, 0, sizeof(cpuid_data));
694
695 cpuid_i = 0;
696
697 /* Paravirtualization CPUIDs */
698 if (hyperv_enabled(cpu)) {
699 c = &cpuid_data.entries[cpuid_i++];
700 c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
701 if (!cpu->hyperv_vendor_id) {
702 memcpy(signature, "Microsoft Hv", 12);
703 } else {
704 size_t len = strlen(cpu->hyperv_vendor_id);
705
706 if (len > 12) {
707 error_report("hv-vendor-id truncated to 12 characters");
708 len = 12;
709 }
710 memset(signature, 0, 12);
711 memcpy(signature, cpu->hyperv_vendor_id, len);
712 }
713 c->eax = HYPERV_CPUID_MIN;
714 c->ebx = signature[0];
715 c->ecx = signature[1];
716 c->edx = signature[2];
717
718 c = &cpuid_data.entries[cpuid_i++];
719 c->function = HYPERV_CPUID_INTERFACE;
720 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
721 c->eax = signature[0];
722 c->ebx = 0;
723 c->ecx = 0;
724 c->edx = 0;
725
726 c = &cpuid_data.entries[cpuid_i++];
727 c->function = HYPERV_CPUID_VERSION;
728 c->eax = 0x00001bbc;
729 c->ebx = 0x00060001;
730
731 c = &cpuid_data.entries[cpuid_i++];
732 c->function = HYPERV_CPUID_FEATURES;
733 r = hyperv_handle_properties(cs);
734 if (r) {
735 return r;
736 }
737 c->eax = env->features[FEAT_HYPERV_EAX];
738 c->ebx = env->features[FEAT_HYPERV_EBX];
739 c->edx = env->features[FEAT_HYPERV_EDX];
740
741 c = &cpuid_data.entries[cpuid_i++];
742 c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
743 if (cpu->hyperv_relaxed_timing) {
744 c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
745 }
746 if (cpu->hyperv_vapic) {
747 c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
748 }
749 c->ebx = cpu->hyperv_spinlock_attempts;
750
751 c = &cpuid_data.entries[cpuid_i++];
752 c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
753 c->eax = 0x40;
754 c->ebx = 0x40;
755
756 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
757 has_msr_hv_hypercall = true;
758 }
759
760 if (cpu->expose_kvm) {
761 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
762 c = &cpuid_data.entries[cpuid_i++];
763 c->function = KVM_CPUID_SIGNATURE | kvm_base;
764 c->eax = KVM_CPUID_FEATURES | kvm_base;
765 c->ebx = signature[0];
766 c->ecx = signature[1];
767 c->edx = signature[2];
768
769 c = &cpuid_data.entries[cpuid_i++];
770 c->function = KVM_CPUID_FEATURES | kvm_base;
771 c->eax = env->features[FEAT_KVM];
772 }
773
774 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
775
776 for (i = 0; i <= limit; i++) {
777 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
778 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
779 abort();
780 }
781 c = &cpuid_data.entries[cpuid_i++];
782
783 switch (i) {
784 case 2: {
785 /* Keep reading function 2 till all the input is received */
786 int times;
787
788 c->function = i;
789 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
790 KVM_CPUID_FLAG_STATE_READ_NEXT;
791 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
792 times = c->eax & 0xff;
793
794 for (j = 1; j < times; ++j) {
795 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
796 fprintf(stderr, "cpuid_data is full, no space for "
797 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
798 abort();
799 }
800 c = &cpuid_data.entries[cpuid_i++];
801 c->function = i;
802 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
803 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
804 }
805 break;
806 }
807 case 4:
808 case 0xb:
809 case 0xd:
810 for (j = 0; ; j++) {
811 if (i == 0xd && j == 64) {
812 break;
813 }
814 c->function = i;
815 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
816 c->index = j;
817 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
818
819 if (i == 4 && c->eax == 0) {
820 break;
821 }
822 if (i == 0xb && !(c->ecx & 0xff00)) {
823 break;
824 }
825 if (i == 0xd && c->eax == 0) {
826 continue;
827 }
828 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
829 fprintf(stderr, "cpuid_data is full, no space for "
830 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
831 abort();
832 }
833 c = &cpuid_data.entries[cpuid_i++];
834 }
835 break;
836 default:
837 c->function = i;
838 c->flags = 0;
839 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
840 break;
841 }
842 }
843
844 if (limit >= 0x0a) {
845 uint32_t ver;
846
847 cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused);
848 if ((ver & 0xff) > 0) {
849 has_msr_architectural_pmu = true;
850 num_architectural_pmu_counters = (ver & 0xff00) >> 8;
851
852 /* Shouldn't be more than 32, since that's the number of bits
853 * available in EBX to tell us _which_ counters are available.
854 * Play it safe.
855 */
856 if (num_architectural_pmu_counters > MAX_GP_COUNTERS) {
857 num_architectural_pmu_counters = MAX_GP_COUNTERS;
858 }
859 }
860 }
861
862 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
863
864 for (i = 0x80000000; i <= limit; i++) {
865 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
866 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
867 abort();
868 }
869 c = &cpuid_data.entries[cpuid_i++];
870
871 c->function = i;
872 c->flags = 0;
873 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
874 }
875
876 /* Call Centaur's CPUID instructions they are supported. */
877 if (env->cpuid_xlevel2 > 0) {
878 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
879
880 for (i = 0xC0000000; i <= limit; i++) {
881 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
882 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
883 abort();
884 }
885 c = &cpuid_data.entries[cpuid_i++];
886
887 c->function = i;
888 c->flags = 0;
889 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
890 }
891 }
892
893 cpuid_data.cpuid.nent = cpuid_i;
894
895 if (((env->cpuid_version >> 8)&0xF) >= 6
896 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
897 (CPUID_MCE | CPUID_MCA)
898 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
899 uint64_t mcg_cap, unsupported_caps;
900 int banks;
901 int ret;
902
903 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
904 if (ret < 0) {
905 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
906 return ret;
907 }
908
909 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
910 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
911 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
912 return -ENOTSUP;
913 }
914
915 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
916 if (unsupported_caps) {
917 if (unsupported_caps & MCG_LMCE_P) {
918 error_report("kvm: LMCE not supported");
919 return -ENOTSUP;
920 }
921 error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64,
922 unsupported_caps);
923 }
924
925 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
926 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
927 if (ret < 0) {
928 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
929 return ret;
930 }
931 }
932
933 qemu_add_vm_change_state_handler(cpu_update_state, env);
934
935 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
936 if (c) {
937 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
938 !!(c->ecx & CPUID_EXT_SMX);
939 }
940
941 if (env->mcg_cap & MCG_LMCE_P) {
942 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
943 }
944
945 if (!env->user_tsc_khz) {
946 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
947 invtsc_mig_blocker == NULL) {
948 /* for migration */
949 error_setg(&invtsc_mig_blocker,
950 "State blocked by non-migratable CPU device"
951 " (invtsc flag)");
952 r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
953 if (local_err) {
954 error_report_err(local_err);
955 error_free(invtsc_mig_blocker);
956 goto fail;
957 }
958 /* for savevm */
959 vmstate_x86_cpu.unmigratable = 1;
960 }
961 }
962
963 r = kvm_arch_set_tsc_khz(cs);
964 if (r < 0) {
965 goto fail;
966 }
967
968 /* vcpu's TSC frequency is either specified by user, or following
969 * the value used by KVM if the former is not present. In the
970 * latter case, we query it from KVM and record in env->tsc_khz,
971 * so that vcpu's TSC frequency can be migrated later via this field.
972 */
973 if (!env->tsc_khz) {
974 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
975 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
976 -ENOTSUP;
977 if (r > 0) {
978 env->tsc_khz = r;
979 }
980 }
981
982 if (cpu->vmware_cpuid_freq
983 /* Guests depend on 0x40000000 to detect this feature, so only expose
984 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
985 && cpu->expose_kvm
986 && kvm_base == KVM_CPUID_SIGNATURE
987 /* TSC clock must be stable and known for this feature. */
988 && ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
989 || env->user_tsc_khz != 0)
990 && env->tsc_khz != 0) {
991
992 c = &cpuid_data.entries[cpuid_i++];
993 c->function = KVM_CPUID_SIGNATURE | 0x10;
994 c->eax = env->tsc_khz;
995 /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
996 * APIC_BUS_CYCLE_NS */
997 c->ebx = 1000000;
998 c->ecx = c->edx = 0;
999
1000 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
1001 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
1002 }
1003
1004 cpuid_data.cpuid.nent = cpuid_i;
1005
1006 cpuid_data.cpuid.padding = 0;
1007 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
1008 if (r) {
1009 goto fail;
1010 }
1011
1012 if (has_xsave) {
1013 env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
1014 }
1015 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
1016
1017 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
1018 has_msr_tsc_aux = false;
1019 }
1020
1021 return 0;
1022
1023 fail:
1024 migrate_del_blocker(invtsc_mig_blocker);
1025 return r;
1026 }
1027
1028 void kvm_arch_reset_vcpu(X86CPU *cpu)
1029 {
1030 CPUX86State *env = &cpu->env;
1031
1032 env->exception_injected = -1;
1033 env->interrupt_injected = -1;
1034 env->xcr0 = 1;
1035 if (kvm_irqchip_in_kernel()) {
1036 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
1037 KVM_MP_STATE_UNINITIALIZED;
1038 } else {
1039 env->mp_state = KVM_MP_STATE_RUNNABLE;
1040 }
1041 }
1042
1043 void kvm_arch_do_init_vcpu(X86CPU *cpu)
1044 {
1045 CPUX86State *env = &cpu->env;
1046
1047 /* APs get directly into wait-for-SIPI state. */
1048 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
1049 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
1050 }
1051 }
1052
1053 static int kvm_get_supported_msrs(KVMState *s)
1054 {
1055 static int kvm_supported_msrs;
1056 int ret = 0;
1057
1058 /* first time */
1059 if (kvm_supported_msrs == 0) {
1060 struct kvm_msr_list msr_list, *kvm_msr_list;
1061
1062 kvm_supported_msrs = -1;
1063
1064 /* Obtain MSR list from KVM. These are the MSRs that we must
1065 * save/restore */
1066 msr_list.nmsrs = 0;
1067 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
1068 if (ret < 0 && ret != -E2BIG) {
1069 return ret;
1070 }
1071 /* Old kernel modules had a bug and could write beyond the provided
1072 memory. Allocate at least a safe amount of 1K. */
1073 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
1074 msr_list.nmsrs *
1075 sizeof(msr_list.indices[0])));
1076
1077 kvm_msr_list->nmsrs = msr_list.nmsrs;
1078 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
1079 if (ret >= 0) {
1080 int i;
1081
1082 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
1083 if (kvm_msr_list->indices[i] == MSR_STAR) {
1084 has_msr_star = true;
1085 continue;
1086 }
1087 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
1088 has_msr_hsave_pa = true;
1089 continue;
1090 }
1091 if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
1092 has_msr_tsc_aux = true;
1093 continue;
1094 }
1095 if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
1096 has_msr_tsc_adjust = true;
1097 continue;
1098 }
1099 if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
1100 has_msr_tsc_deadline = true;
1101 continue;
1102 }
1103 if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
1104 has_msr_smbase = true;
1105 continue;
1106 }
1107 if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
1108 has_msr_misc_enable = true;
1109 continue;
1110 }
1111 if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) {
1112 has_msr_bndcfgs = true;
1113 continue;
1114 }
1115 if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
1116 has_msr_xss = true;
1117 continue;
1118 }
1119 if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
1120 has_msr_hv_crash = true;
1121 continue;
1122 }
1123 if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
1124 has_msr_hv_reset = true;
1125 continue;
1126 }
1127 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
1128 has_msr_hv_vpindex = true;
1129 continue;
1130 }
1131 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
1132 has_msr_hv_runtime = true;
1133 continue;
1134 }
1135 if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
1136 has_msr_hv_synic = true;
1137 continue;
1138 }
1139 if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) {
1140 has_msr_hv_stimer = true;
1141 continue;
1142 }
1143 }
1144 }
1145
1146 g_free(kvm_msr_list);
1147 }
1148
1149 return ret;
1150 }
1151
1152 static Notifier smram_machine_done;
1153 static KVMMemoryListener smram_listener;
1154 static AddressSpace smram_address_space;
1155 static MemoryRegion smram_as_root;
1156 static MemoryRegion smram_as_mem;
1157
1158 static void register_smram_listener(Notifier *n, void *unused)
1159 {
1160 MemoryRegion *smram =
1161 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
1162
1163 /* Outer container... */
1164 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
1165 memory_region_set_enabled(&smram_as_root, true);
1166
1167 /* ... with two regions inside: normal system memory with low
1168 * priority, and...
1169 */
1170 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
1171 get_system_memory(), 0, ~0ull);
1172 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
1173 memory_region_set_enabled(&smram_as_mem, true);
1174
1175 if (smram) {
1176 /* ... SMRAM with higher priority */
1177 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
1178 memory_region_set_enabled(smram, true);
1179 }
1180
1181 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
1182 kvm_memory_listener_register(kvm_state, &smram_listener,
1183 &smram_address_space, 1);
1184 }
1185
1186 int kvm_arch_init(MachineState *ms, KVMState *s)
1187 {
1188 uint64_t identity_base = 0xfffbc000;
1189 uint64_t shadow_mem;
1190 int ret;
1191 struct utsname utsname;
1192
1193 #ifdef KVM_CAP_XSAVE
1194 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1195 #endif
1196
1197 #ifdef KVM_CAP_XCRS
1198 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1199 #endif
1200
1201 #ifdef KVM_CAP_PIT_STATE2
1202 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1203 #endif
1204
1205 ret = kvm_get_supported_msrs(s);
1206 if (ret < 0) {
1207 return ret;
1208 }
1209
1210 uname(&utsname);
1211 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
1212
1213 /*
1214 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
1215 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
1216 * Since these must be part of guest physical memory, we need to allocate
1217 * them, both by setting their start addresses in the kernel and by
1218 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
1219 *
1220 * Older KVM versions may not support setting the identity map base. In
1221 * that case we need to stick with the default, i.e. a 256K maximum BIOS
1222 * size.
1223 */
1224 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
1225 /* Allows up to 16M BIOSes. */
1226 identity_base = 0xfeffc000;
1227
1228 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
1229 if (ret < 0) {
1230 return ret;
1231 }
1232 }
1233
1234 /* Set TSS base one page after EPT identity map. */
1235 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
1236 if (ret < 0) {
1237 return ret;
1238 }
1239
1240 /* Tell fw_cfg to notify the BIOS to reserve the range. */
1241 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
1242 if (ret < 0) {
1243 fprintf(stderr, "e820_add_entry() table is full\n");
1244 return ret;
1245 }
1246 qemu_register_reset(kvm_unpoison_all, NULL);
1247
1248 shadow_mem = machine_kvm_shadow_mem(ms);
1249 if (shadow_mem != -1) {
1250 shadow_mem /= 4096;
1251 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
1252 if (ret < 0) {
1253 return ret;
1254 }
1255 }
1256
1257 if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
1258 smram_machine_done.notify = register_smram_listener;
1259 qemu_add_machine_init_done_notifier(&smram_machine_done);
1260 }
1261 return 0;
1262 }
1263
1264 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1265 {
1266 lhs->selector = rhs->selector;
1267 lhs->base = rhs->base;
1268 lhs->limit = rhs->limit;
1269 lhs->type = 3;
1270 lhs->present = 1;
1271 lhs->dpl = 3;
1272 lhs->db = 0;
1273 lhs->s = 1;
1274 lhs->l = 0;
1275 lhs->g = 0;
1276 lhs->avl = 0;
1277 lhs->unusable = 0;
1278 }
1279
1280 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1281 {
1282 unsigned flags = rhs->flags;
1283 lhs->selector = rhs->selector;
1284 lhs->base = rhs->base;
1285 lhs->limit = rhs->limit;
1286 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
1287 lhs->present = (flags & DESC_P_MASK) != 0;
1288 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
1289 lhs->db = (flags >> DESC_B_SHIFT) & 1;
1290 lhs->s = (flags & DESC_S_MASK) != 0;
1291 lhs->l = (flags >> DESC_L_SHIFT) & 1;
1292 lhs->g = (flags & DESC_G_MASK) != 0;
1293 lhs->avl = (flags & DESC_AVL_MASK) != 0;
1294 lhs->unusable = !lhs->present;
1295 lhs->padding = 0;
1296 }
1297
1298 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
1299 {
1300 lhs->selector = rhs->selector;
1301 lhs->base = rhs->base;
1302 lhs->limit = rhs->limit;
1303 if (rhs->unusable) {
1304 lhs->flags = 0;
1305 } else {
1306 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
1307 (rhs->present * DESC_P_MASK) |
1308 (rhs->dpl << DESC_DPL_SHIFT) |
1309 (rhs->db << DESC_B_SHIFT) |
1310 (rhs->s * DESC_S_MASK) |
1311 (rhs->l << DESC_L_SHIFT) |
1312 (rhs->g * DESC_G_MASK) |
1313 (rhs->avl * DESC_AVL_MASK);
1314 }
1315 }
1316
1317 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
1318 {
1319 if (set) {
1320 *kvm_reg = *qemu_reg;
1321 } else {
1322 *qemu_reg = *kvm_reg;
1323 }
1324 }
1325
1326 static int kvm_getput_regs(X86CPU *cpu, int set)
1327 {
1328 CPUX86State *env = &cpu->env;
1329 struct kvm_regs regs;
1330 int ret = 0;
1331
1332 if (!set) {
1333 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
1334 if (ret < 0) {
1335 return ret;
1336 }
1337 }
1338
1339 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
1340 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
1341 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
1342 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
1343 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
1344 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
1345 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
1346 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
1347 #ifdef TARGET_X86_64
1348 kvm_getput_reg(&regs.r8, &env->regs[8], set);
1349 kvm_getput_reg(&regs.r9, &env->regs[9], set);
1350 kvm_getput_reg(&regs.r10, &env->regs[10], set);
1351 kvm_getput_reg(&regs.r11, &env->regs[11], set);
1352 kvm_getput_reg(&regs.r12, &env->regs[12], set);
1353 kvm_getput_reg(&regs.r13, &env->regs[13], set);
1354 kvm_getput_reg(&regs.r14, &env->regs[14], set);
1355 kvm_getput_reg(&regs.r15, &env->regs[15], set);
1356 #endif
1357
1358 kvm_getput_reg(&regs.rflags, &env->eflags, set);
1359 kvm_getput_reg(&regs.rip, &env->eip, set);
1360
1361 if (set) {
1362 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
1363 }
1364
1365 return ret;
1366 }
1367
1368 static int kvm_put_fpu(X86CPU *cpu)
1369 {
1370 CPUX86State *env = &cpu->env;
1371 struct kvm_fpu fpu;
1372 int i;
1373
1374 memset(&fpu, 0, sizeof fpu);
1375 fpu.fsw = env->fpus & ~(7 << 11);
1376 fpu.fsw |= (env->fpstt & 7) << 11;
1377 fpu.fcw = env->fpuc;
1378 fpu.last_opcode = env->fpop;
1379 fpu.last_ip = env->fpip;
1380 fpu.last_dp = env->fpdp;
1381 for (i = 0; i < 8; ++i) {
1382 fpu.ftwx |= (!env->fptags[i]) << i;
1383 }
1384 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
1385 for (i = 0; i < CPU_NB_REGS; i++) {
1386 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
1387 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
1388 }
1389 fpu.mxcsr = env->mxcsr;
1390
1391 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
1392 }
1393
1394 #define XSAVE_FCW_FSW 0
1395 #define XSAVE_FTW_FOP 1
1396 #define XSAVE_CWD_RIP 2
1397 #define XSAVE_CWD_RDP 4
1398 #define XSAVE_MXCSR 6
1399 #define XSAVE_ST_SPACE 8
1400 #define XSAVE_XMM_SPACE 40
1401 #define XSAVE_XSTATE_BV 128
1402 #define XSAVE_YMMH_SPACE 144
1403 #define XSAVE_BNDREGS 240
1404 #define XSAVE_BNDCSR 256
1405 #define XSAVE_OPMASK 272
1406 #define XSAVE_ZMM_Hi256 288
1407 #define XSAVE_Hi16_ZMM 416
1408 #define XSAVE_PKRU 672
1409
1410 #define XSAVE_BYTE_OFFSET(word_offset) \
1411 ((word_offset) * sizeof(((struct kvm_xsave *)0)->region[0]))
1412
1413 #define ASSERT_OFFSET(word_offset, field) \
1414 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
1415 offsetof(X86XSaveArea, field))
1416
1417 ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
1418 ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
1419 ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
1420 ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
1421 ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
1422 ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
1423 ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
1424 ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
1425 ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
1426 ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
1427 ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
1428 ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
1429 ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
1430 ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
1431 ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
1432
1433 static int kvm_put_xsave(X86CPU *cpu)
1434 {
1435 CPUX86State *env = &cpu->env;
1436 X86XSaveArea *xsave = env->kvm_xsave_buf;
1437 uint16_t cwd, swd, twd;
1438 int i;
1439
1440 if (!has_xsave) {
1441 return kvm_put_fpu(cpu);
1442 }
1443
1444 memset(xsave, 0, sizeof(struct kvm_xsave));
1445 twd = 0;
1446 swd = env->fpus & ~(7 << 11);
1447 swd |= (env->fpstt & 7) << 11;
1448 cwd = env->fpuc;
1449 for (i = 0; i < 8; ++i) {
1450 twd |= (!env->fptags[i]) << i;
1451 }
1452 xsave->legacy.fcw = cwd;
1453 xsave->legacy.fsw = swd;
1454 xsave->legacy.ftw = twd;
1455 xsave->legacy.fpop = env->fpop;
1456 xsave->legacy.fpip = env->fpip;
1457 xsave->legacy.fpdp = env->fpdp;
1458 memcpy(&xsave->legacy.fpregs, env->fpregs,
1459 sizeof env->fpregs);
1460 xsave->legacy.mxcsr = env->mxcsr;
1461 xsave->header.xstate_bv = env->xstate_bv;
1462 memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
1463 sizeof env->bnd_regs);
1464 xsave->bndcsr_state.bndcsr = env->bndcs_regs;
1465 memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
1466 sizeof env->opmask_regs);
1467
1468 for (i = 0; i < CPU_NB_REGS; i++) {
1469 uint8_t *xmm = xsave->legacy.xmm_regs[i];
1470 uint8_t *ymmh = xsave->avx_state.ymmh[i];
1471 uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
1472 stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
1473 stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
1474 stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
1475 stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
1476 stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
1477 stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
1478 stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
1479 stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
1480 }
1481
1482 #ifdef TARGET_X86_64
1483 memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
1484 16 * sizeof env->xmm_regs[16]);
1485 memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
1486 #endif
1487 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
1488 }
1489
1490 static int kvm_put_xcrs(X86CPU *cpu)
1491 {
1492 CPUX86State *env = &cpu->env;
1493 struct kvm_xcrs xcrs = {};
1494
1495 if (!has_xcrs) {
1496 return 0;
1497 }
1498
1499 xcrs.nr_xcrs = 1;
1500 xcrs.flags = 0;
1501 xcrs.xcrs[0].xcr = 0;
1502 xcrs.xcrs[0].value = env->xcr0;
1503 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
1504 }
1505
1506 static int kvm_put_sregs(X86CPU *cpu)
1507 {
1508 CPUX86State *env = &cpu->env;
1509 struct kvm_sregs sregs;
1510
1511 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
1512 if (env->interrupt_injected >= 0) {
1513 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
1514 (uint64_t)1 << (env->interrupt_injected % 64);
1515 }
1516
1517 if ((env->eflags & VM_MASK)) {
1518 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
1519 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
1520 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
1521 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
1522 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
1523 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
1524 } else {
1525 set_seg(&sregs.cs, &env->segs[R_CS]);
1526 set_seg(&sregs.ds, &env->segs[R_DS]);
1527 set_seg(&sregs.es, &env->segs[R_ES]);
1528 set_seg(&sregs.fs, &env->segs[R_FS]);
1529 set_seg(&sregs.gs, &env->segs[R_GS]);
1530 set_seg(&sregs.ss, &env->segs[R_SS]);
1531 }
1532
1533 set_seg(&sregs.tr, &env->tr);
1534 set_seg(&sregs.ldt, &env->ldt);
1535
1536 sregs.idt.limit = env->idt.limit;
1537 sregs.idt.base = env->idt.base;
1538 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
1539 sregs.gdt.limit = env->gdt.limit;
1540 sregs.gdt.base = env->gdt.base;
1541 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
1542
1543 sregs.cr0 = env->cr[0];
1544 sregs.cr2 = env->cr[2];
1545 sregs.cr3 = env->cr[3];
1546 sregs.cr4 = env->cr[4];
1547
1548 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
1549 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
1550
1551 sregs.efer = env->efer;
1552
1553 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
1554 }
1555
1556 static void kvm_msr_buf_reset(X86CPU *cpu)
1557 {
1558 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
1559 }
1560
1561 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
1562 {
1563 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
1564 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
1565 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
1566
1567 assert((void *)(entry + 1) <= limit);
1568
1569 entry->index = index;
1570 entry->reserved = 0;
1571 entry->data = value;
1572 msrs->nmsrs++;
1573 }
1574
1575 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
1576 {
1577 kvm_msr_buf_reset(cpu);
1578 kvm_msr_entry_add(cpu, index, value);
1579
1580 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
1581 }
1582
1583 void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
1584 {
1585 int ret;
1586
1587 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
1588 assert(ret == 1);
1589 }
1590
1591 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
1592 {
1593 CPUX86State *env = &cpu->env;
1594 int ret;
1595
1596 if (!has_msr_tsc_deadline) {
1597 return 0;
1598 }
1599
1600 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
1601 if (ret < 0) {
1602 return ret;
1603 }
1604
1605 assert(ret == 1);
1606 return 0;
1607 }
1608
1609 /*
1610 * Provide a separate write service for the feature control MSR in order to
1611 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1612 * before writing any other state because forcibly leaving nested mode
1613 * invalidates the VCPU state.
1614 */
1615 static int kvm_put_msr_feature_control(X86CPU *cpu)
1616 {
1617 int ret;
1618
1619 if (!has_msr_feature_control) {
1620 return 0;
1621 }
1622
1623 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
1624 cpu->env.msr_ia32_feature_control);
1625 if (ret < 0) {
1626 return ret;
1627 }
1628
1629 assert(ret == 1);
1630 return 0;
1631 }
1632
1633 static int kvm_put_msrs(X86CPU *cpu, int level)
1634 {
1635 CPUX86State *env = &cpu->env;
1636 int i;
1637 int ret;
1638
1639 kvm_msr_buf_reset(cpu);
1640
1641 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
1642 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
1643 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
1644 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
1645 if (has_msr_star) {
1646 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
1647 }
1648 if (has_msr_hsave_pa) {
1649 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
1650 }
1651 if (has_msr_tsc_aux) {
1652 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
1653 }
1654 if (has_msr_tsc_adjust) {
1655 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
1656 }
1657 if (has_msr_misc_enable) {
1658 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
1659 env->msr_ia32_misc_enable);
1660 }
1661 if (has_msr_smbase) {
1662 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
1663 }
1664 if (has_msr_bndcfgs) {
1665 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
1666 }
1667 if (has_msr_xss) {
1668 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
1669 }
1670 #ifdef TARGET_X86_64
1671 if (lm_capable_kernel) {
1672 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
1673 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
1674 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
1675 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
1676 }
1677 #endif
1678 /*
1679 * The following MSRs have side effects on the guest or are too heavy
1680 * for normal writeback. Limit them to reset or full state updates.
1681 */
1682 if (level >= KVM_PUT_RESET_STATE) {
1683 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
1684 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
1685 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
1686 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
1687 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
1688 }
1689 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
1690 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
1691 }
1692 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
1693 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
1694 }
1695 if (has_msr_architectural_pmu) {
1696 /* Stop the counter. */
1697 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
1698 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
1699
1700 /* Set the counter values. */
1701 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
1702 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
1703 env->msr_fixed_counters[i]);
1704 }
1705 for (i = 0; i < num_architectural_pmu_counters; i++) {
1706 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
1707 env->msr_gp_counters[i]);
1708 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
1709 env->msr_gp_evtsel[i]);
1710 }
1711 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
1712 env->msr_global_status);
1713 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1714 env->msr_global_ovf_ctrl);
1715
1716 /* Now start the PMU. */
1717 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
1718 env->msr_fixed_ctr_ctrl);
1719 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
1720 env->msr_global_ctrl);
1721 }
1722 if (has_msr_hv_hypercall) {
1723 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
1724 env->msr_hv_guest_os_id);
1725 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
1726 env->msr_hv_hypercall);
1727 }
1728 if (cpu->hyperv_vapic) {
1729 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
1730 env->msr_hv_vapic);
1731 }
1732 if (cpu->hyperv_time) {
1733 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
1734 }
1735 if (has_msr_hv_crash) {
1736 int j;
1737
1738 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
1739 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
1740 env->msr_hv_crash_params[j]);
1741
1742 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
1743 HV_X64_MSR_CRASH_CTL_NOTIFY);
1744 }
1745 if (has_msr_hv_runtime) {
1746 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
1747 }
1748 if (cpu->hyperv_synic) {
1749 int j;
1750
1751 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
1752 env->msr_hv_synic_control);
1753 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION,
1754 env->msr_hv_synic_version);
1755 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
1756 env->msr_hv_synic_evt_page);
1757 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
1758 env->msr_hv_synic_msg_page);
1759
1760 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
1761 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
1762 env->msr_hv_synic_sint[j]);
1763 }
1764 }
1765 if (has_msr_hv_stimer) {
1766 int j;
1767
1768 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
1769 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
1770 env->msr_hv_stimer_config[j]);
1771 }
1772
1773 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
1774 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
1775 env->msr_hv_stimer_count[j]);
1776 }
1777 }
1778 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
1779 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
1780
1781 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
1782 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
1783 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
1784 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
1785 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
1786 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
1787 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
1788 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
1789 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
1790 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
1791 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
1792 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
1793 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
1794 /* The CPU GPs if we write to a bit above the physical limit of
1795 * the host CPU (and KVM emulates that)
1796 */
1797 uint64_t mask = env->mtrr_var[i].mask;
1798 mask &= phys_mask;
1799
1800 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
1801 env->mtrr_var[i].base);
1802 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
1803 }
1804 }
1805
1806 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
1807 * kvm_put_msr_feature_control. */
1808 }
1809 if (env->mcg_cap) {
1810 int i;
1811
1812 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
1813 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
1814 if (has_msr_mcg_ext_ctl) {
1815 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
1816 }
1817 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1818 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
1819 }
1820 }
1821
1822 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
1823 if (ret < 0) {
1824 return ret;
1825 }
1826
1827 assert(ret == cpu->kvm_msr_buf->nmsrs);
1828 return 0;
1829 }
1830
1831
1832 static int kvm_get_fpu(X86CPU *cpu)
1833 {
1834 CPUX86State *env = &cpu->env;
1835 struct kvm_fpu fpu;
1836 int i, ret;
1837
1838 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
1839 if (ret < 0) {
1840 return ret;
1841 }
1842
1843 env->fpstt = (fpu.fsw >> 11) & 7;
1844 env->fpus = fpu.fsw;
1845 env->fpuc = fpu.fcw;
1846 env->fpop = fpu.last_opcode;
1847 env->fpip = fpu.last_ip;
1848 env->fpdp = fpu.last_dp;
1849 for (i = 0; i < 8; ++i) {
1850 env->fptags[i] = !((fpu.ftwx >> i) & 1);
1851 }
1852 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
1853 for (i = 0; i < CPU_NB_REGS; i++) {
1854 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
1855 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
1856 }
1857 env->mxcsr = fpu.mxcsr;
1858
1859 return 0;
1860 }
1861
1862 static int kvm_get_xsave(X86CPU *cpu)
1863 {
1864 CPUX86State *env = &cpu->env;
1865 X86XSaveArea *xsave = env->kvm_xsave_buf;
1866 int ret, i;
1867 uint16_t cwd, swd, twd;
1868
1869 if (!has_xsave) {
1870 return kvm_get_fpu(cpu);
1871 }
1872
1873 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
1874 if (ret < 0) {
1875 return ret;
1876 }
1877
1878 cwd = xsave->legacy.fcw;
1879 swd = xsave->legacy.fsw;
1880 twd = xsave->legacy.ftw;
1881 env->fpop = xsave->legacy.fpop;
1882 env->fpstt = (swd >> 11) & 7;
1883 env->fpus = swd;
1884 env->fpuc = cwd;
1885 for (i = 0; i < 8; ++i) {
1886 env->fptags[i] = !((twd >> i) & 1);
1887 }
1888 env->fpip = xsave->legacy.fpip;
1889 env->fpdp = xsave->legacy.fpdp;
1890 env->mxcsr = xsave->legacy.mxcsr;
1891 memcpy(env->fpregs, &xsave->legacy.fpregs,
1892 sizeof env->fpregs);
1893 env->xstate_bv = xsave->header.xstate_bv;
1894 memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
1895 sizeof env->bnd_regs);
1896 env->bndcs_regs = xsave->bndcsr_state.bndcsr;
1897 memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
1898 sizeof env->opmask_regs);
1899
1900 for (i = 0; i < CPU_NB_REGS; i++) {
1901 uint8_t *xmm = xsave->legacy.xmm_regs[i];
1902 uint8_t *ymmh = xsave->avx_state.ymmh[i];
1903 uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
1904 env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
1905 env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
1906 env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
1907 env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
1908 env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
1909 env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
1910 env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
1911 env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
1912 }
1913
1914 #ifdef TARGET_X86_64
1915 memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
1916 16 * sizeof env->xmm_regs[16]);
1917 memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
1918 #endif
1919 return 0;
1920 }
1921
1922 static int kvm_get_xcrs(X86CPU *cpu)
1923 {
1924 CPUX86State *env = &cpu->env;
1925 int i, ret;
1926 struct kvm_xcrs xcrs;
1927
1928 if (!has_xcrs) {
1929 return 0;
1930 }
1931
1932 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
1933 if (ret < 0) {
1934 return ret;
1935 }
1936
1937 for (i = 0; i < xcrs.nr_xcrs; i++) {
1938 /* Only support xcr0 now */
1939 if (xcrs.xcrs[i].xcr == 0) {
1940 env->xcr0 = xcrs.xcrs[i].value;
1941 break;
1942 }
1943 }
1944 return 0;
1945 }
1946
1947 static int kvm_get_sregs(X86CPU *cpu)
1948 {
1949 CPUX86State *env = &cpu->env;
1950 struct kvm_sregs sregs;
1951 uint32_t hflags;
1952 int bit, i, ret;
1953
1954 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1955 if (ret < 0) {
1956 return ret;
1957 }
1958
1959 /* There can only be one pending IRQ set in the bitmap at a time, so try
1960 to find it and save its number instead (-1 for none). */
1961 env->interrupt_injected = -1;
1962 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1963 if (sregs.interrupt_bitmap[i]) {
1964 bit = ctz64(sregs.interrupt_bitmap[i]);
1965 env->interrupt_injected = i * 64 + bit;
1966 break;
1967 }
1968 }
1969
1970 get_seg(&env->segs[R_CS], &sregs.cs);
1971 get_seg(&env->segs[R_DS], &sregs.ds);
1972 get_seg(&env->segs[R_ES], &sregs.es);
1973 get_seg(&env->segs[R_FS], &sregs.fs);
1974 get_seg(&env->segs[R_GS], &sregs.gs);
1975 get_seg(&env->segs[R_SS], &sregs.ss);
1976
1977 get_seg(&env->tr, &sregs.tr);
1978 get_seg(&env->ldt, &sregs.ldt);
1979
1980 env->idt.limit = sregs.idt.limit;
1981 env->idt.base = sregs.idt.base;
1982 env->gdt.limit = sregs.gdt.limit;
1983 env->gdt.base = sregs.gdt.base;
1984
1985 env->cr[0] = sregs.cr0;
1986 env->cr[2] = sregs.cr2;
1987 env->cr[3] = sregs.cr3;
1988 env->cr[4] = sregs.cr4;
1989
1990 env->efer = sregs.efer;
1991
1992 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
1993
1994 #define HFLAG_COPY_MASK \
1995 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1996 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1997 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1998 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1999
2000 hflags = env->hflags & HFLAG_COPY_MASK;
2001 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
2002 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
2003 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
2004 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
2005 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
2006
2007 if (env->cr[4] & CR4_OSFXSR_MASK) {
2008 hflags |= HF_OSFXSR_MASK;
2009 }
2010
2011 if (env->efer & MSR_EFER_LMA) {
2012 hflags |= HF_LMA_MASK;
2013 }
2014
2015 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
2016 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
2017 } else {
2018 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
2019 (DESC_B_SHIFT - HF_CS32_SHIFT);
2020 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
2021 (DESC_B_SHIFT - HF_SS32_SHIFT);
2022 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
2023 !(hflags & HF_CS32_MASK)) {
2024 hflags |= HF_ADDSEG_MASK;
2025 } else {
2026 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
2027 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
2028 }
2029 }
2030 env->hflags = hflags;
2031
2032 return 0;
2033 }
2034
2035 static int kvm_get_msrs(X86CPU *cpu)
2036 {
2037 CPUX86State *env = &cpu->env;
2038 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
2039 int ret, i;
2040 uint64_t mtrr_top_bits;
2041
2042 kvm_msr_buf_reset(cpu);
2043
2044 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
2045 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
2046 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
2047 kvm_msr_entry_add(cpu, MSR_PAT, 0);
2048 if (has_msr_star) {
2049 kvm_msr_entry_add(cpu, MSR_STAR, 0);
2050 }
2051 if (has_msr_hsave_pa) {
2052 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
2053 }
2054 if (has_msr_tsc_aux) {
2055 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
2056 }
2057 if (has_msr_tsc_adjust) {
2058 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
2059 }
2060 if (has_msr_tsc_deadline) {
2061 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
2062 }
2063 if (has_msr_misc_enable) {
2064 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
2065 }
2066 if (has_msr_smbase) {
2067 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
2068 }
2069 if (has_msr_feature_control) {
2070 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
2071 }
2072 if (has_msr_bndcfgs) {
2073 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
2074 }
2075 if (has_msr_xss) {
2076 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
2077 }
2078
2079
2080 if (!env->tsc_valid) {
2081 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
2082 env->tsc_valid = !runstate_is_running();
2083 }
2084
2085 #ifdef TARGET_X86_64
2086 if (lm_capable_kernel) {
2087 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
2088 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
2089 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
2090 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
2091 }
2092 #endif
2093 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
2094 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
2095 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
2096 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
2097 }
2098 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
2099 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
2100 }
2101 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
2102 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
2103 }
2104 if (has_msr_architectural_pmu) {
2105 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2106 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2107 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
2108 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
2109 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
2110 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
2111 }
2112 for (i = 0; i < num_architectural_pmu_counters; i++) {
2113 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
2114 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
2115 }
2116 }
2117
2118 if (env->mcg_cap) {
2119 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
2120 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
2121 if (has_msr_mcg_ext_ctl) {
2122 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
2123 }
2124 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
2125 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
2126 }
2127 }
2128
2129 if (has_msr_hv_hypercall) {
2130 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
2131 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
2132 }
2133 if (cpu->hyperv_vapic) {
2134 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
2135 }
2136 if (cpu->hyperv_time) {
2137 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
2138 }
2139 if (has_msr_hv_crash) {
2140 int j;
2141
2142 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
2143 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
2144 }
2145 }
2146 if (has_msr_hv_runtime) {
2147 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
2148 }
2149 if (cpu->hyperv_synic) {
2150 uint32_t msr;
2151
2152 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
2153 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0);
2154 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
2155 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
2156 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
2157 kvm_msr_entry_add(cpu, msr, 0);
2158 }
2159 }
2160 if (has_msr_hv_stimer) {
2161 uint32_t msr;
2162
2163 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
2164 msr++) {
2165 kvm_msr_entry_add(cpu, msr, 0);
2166 }
2167 }
2168 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
2169 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
2170 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
2171 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
2172 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
2173 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
2174 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
2175 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
2176 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
2177 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
2178 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
2179 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
2180 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
2181 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
2182 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
2183 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
2184 }
2185 }
2186
2187 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
2188 if (ret < 0) {
2189 return ret;
2190 }
2191
2192 assert(ret == cpu->kvm_msr_buf->nmsrs);
2193 /*
2194 * MTRR masks: Each mask consists of 5 parts
2195 * a 10..0: must be zero
2196 * b 11 : valid bit
2197 * c n-1.12: actual mask bits
2198 * d 51..n: reserved must be zero
2199 * e 63.52: reserved must be zero
2200 *
2201 * 'n' is the number of physical bits supported by the CPU and is
2202 * apparently always <= 52. We know our 'n' but don't know what
2203 * the destinations 'n' is; it might be smaller, in which case
2204 * it masks (c) on loading. It might be larger, in which case
2205 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
2206 * we're migrating to.
2207 */
2208
2209 if (cpu->fill_mtrr_mask) {
2210 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
2211 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
2212 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
2213 } else {
2214 mtrr_top_bits = 0;
2215 }
2216
2217 for (i = 0; i < ret; i++) {
2218 uint32_t index = msrs[i].index;
2219 switch (index) {
2220 case MSR_IA32_SYSENTER_CS:
2221 env->sysenter_cs = msrs[i].data;
2222 break;
2223 case MSR_IA32_SYSENTER_ESP:
2224 env->sysenter_esp = msrs[i].data;
2225 break;
2226 case MSR_IA32_SYSENTER_EIP:
2227 env->sysenter_eip = msrs[i].data;
2228 break;
2229 case MSR_PAT:
2230 env->pat = msrs[i].data;
2231 break;
2232 case MSR_STAR:
2233 env->star = msrs[i].data;
2234 break;
2235 #ifdef TARGET_X86_64
2236 case MSR_CSTAR:
2237 env->cstar = msrs[i].data;
2238 break;
2239 case MSR_KERNELGSBASE:
2240 env->kernelgsbase = msrs[i].data;
2241 break;
2242 case MSR_FMASK:
2243 env->fmask = msrs[i].data;
2244 break;
2245 case MSR_LSTAR:
2246 env->lstar = msrs[i].data;
2247 break;
2248 #endif
2249 case MSR_IA32_TSC:
2250 env->tsc = msrs[i].data;
2251 break;
2252 case MSR_TSC_AUX:
2253 env->tsc_aux = msrs[i].data;
2254 break;
2255 case MSR_TSC_ADJUST:
2256 env->tsc_adjust = msrs[i].data;
2257 break;
2258 case MSR_IA32_TSCDEADLINE:
2259 env->tsc_deadline = msrs[i].data;
2260 break;
2261 case MSR_VM_HSAVE_PA:
2262 env->vm_hsave = msrs[i].data;
2263 break;
2264 case MSR_KVM_SYSTEM_TIME:
2265 env->system_time_msr = msrs[i].data;
2266 break;
2267 case MSR_KVM_WALL_CLOCK:
2268 env->wall_clock_msr = msrs[i].data;
2269 break;
2270 case MSR_MCG_STATUS:
2271 env->mcg_status = msrs[i].data;
2272 break;
2273 case MSR_MCG_CTL:
2274 env->mcg_ctl = msrs[i].data;
2275 break;
2276 case MSR_MCG_EXT_CTL:
2277 env->mcg_ext_ctl = msrs[i].data;
2278 break;
2279 case MSR_IA32_MISC_ENABLE:
2280 env->msr_ia32_misc_enable = msrs[i].data;
2281 break;
2282 case MSR_IA32_SMBASE:
2283 env->smbase = msrs[i].data;
2284 break;
2285 case MSR_IA32_FEATURE_CONTROL:
2286 env->msr_ia32_feature_control = msrs[i].data;
2287 break;
2288 case MSR_IA32_BNDCFGS:
2289 env->msr_bndcfgs = msrs[i].data;
2290 break;
2291 case MSR_IA32_XSS:
2292 env->xss = msrs[i].data;
2293 break;
2294 default:
2295 if (msrs[i].index >= MSR_MC0_CTL &&
2296 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
2297 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
2298 }
2299 break;
2300 case MSR_KVM_ASYNC_PF_EN:
2301 env->async_pf_en_msr = msrs[i].data;
2302 break;
2303 case MSR_KVM_PV_EOI_EN:
2304 env->pv_eoi_en_msr = msrs[i].data;
2305 break;
2306 case MSR_KVM_STEAL_TIME:
2307 env->steal_time_msr = msrs[i].data;
2308 break;
2309 case MSR_CORE_PERF_FIXED_CTR_CTRL:
2310 env->msr_fixed_ctr_ctrl = msrs[i].data;
2311 break;
2312 case MSR_CORE_PERF_GLOBAL_CTRL:
2313 env->msr_global_ctrl = msrs[i].data;
2314 break;
2315 case MSR_CORE_PERF_GLOBAL_STATUS:
2316 env->msr_global_status = msrs[i].data;
2317 break;
2318 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
2319 env->msr_global_ovf_ctrl = msrs[i].data;
2320 break;
2321 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
2322 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
2323 break;
2324 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
2325 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
2326 break;
2327 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
2328 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
2329 break;
2330 case HV_X64_MSR_HYPERCALL:
2331 env->msr_hv_hypercall = msrs[i].data;
2332 break;
2333 case HV_X64_MSR_GUEST_OS_ID:
2334 env->msr_hv_guest_os_id = msrs[i].data;
2335 break;
2336 case HV_X64_MSR_APIC_ASSIST_PAGE:
2337 env->msr_hv_vapic = msrs[i].data;
2338 break;
2339 case HV_X64_MSR_REFERENCE_TSC:
2340 env->msr_hv_tsc = msrs[i].data;
2341 break;
2342 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2343 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
2344 break;
2345 case HV_X64_MSR_VP_RUNTIME:
2346 env->msr_hv_runtime = msrs[i].data;
2347 break;
2348 case HV_X64_MSR_SCONTROL:
2349 env->msr_hv_synic_control = msrs[i].data;
2350 break;
2351 case HV_X64_MSR_SVERSION:
2352 env->msr_hv_synic_version = msrs[i].data;
2353 break;
2354 case HV_X64_MSR_SIEFP:
2355 env->msr_hv_synic_evt_page = msrs[i].data;
2356 break;
2357 case HV_X64_MSR_SIMP:
2358 env->msr_hv_synic_msg_page = msrs[i].data;
2359 break;
2360 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
2361 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
2362 break;
2363 case HV_X64_MSR_STIMER0_CONFIG:
2364 case HV_X64_MSR_STIMER1_CONFIG:
2365 case HV_X64_MSR_STIMER2_CONFIG:
2366 case HV_X64_MSR_STIMER3_CONFIG:
2367 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
2368 msrs[i].data;
2369 break;
2370 case HV_X64_MSR_STIMER0_COUNT:
2371 case HV_X64_MSR_STIMER1_COUNT:
2372 case HV_X64_MSR_STIMER2_COUNT:
2373 case HV_X64_MSR_STIMER3_COUNT:
2374 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
2375 msrs[i].data;
2376 break;
2377 case MSR_MTRRdefType:
2378 env->mtrr_deftype = msrs[i].data;
2379 break;
2380 case MSR_MTRRfix64K_00000:
2381 env->mtrr_fixed[0] = msrs[i].data;
2382 break;
2383 case MSR_MTRRfix16K_80000:
2384 env->mtrr_fixed[1] = msrs[i].data;
2385 break;
2386 case MSR_MTRRfix16K_A0000:
2387 env->mtrr_fixed[2] = msrs[i].data;
2388 break;
2389 case MSR_MTRRfix4K_C0000:
2390 env->mtrr_fixed[3] = msrs[i].data;
2391 break;
2392 case MSR_MTRRfix4K_C8000:
2393 env->mtrr_fixed[4] = msrs[i].data;
2394 break;
2395 case MSR_MTRRfix4K_D0000:
2396 env->mtrr_fixed[5] = msrs[i].data;
2397 break;
2398 case MSR_MTRRfix4K_D8000:
2399 env->mtrr_fixed[6] = msrs[i].data;
2400 break;
2401 case MSR_MTRRfix4K_E0000:
2402 env->mtrr_fixed[7] = msrs[i].data;
2403 break;
2404 case MSR_MTRRfix4K_E8000:
2405 env->mtrr_fixed[8] = msrs[i].data;
2406 break;
2407 case MSR_MTRRfix4K_F0000:
2408 env->mtrr_fixed[9] = msrs[i].data;
2409 break;
2410 case MSR_MTRRfix4K_F8000:
2411 env->mtrr_fixed[10] = msrs[i].data;
2412 break;
2413 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
2414 if (index & 1) {
2415 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
2416 mtrr_top_bits;
2417 } else {
2418 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
2419 }
2420 break;
2421 }
2422 }
2423
2424 return 0;
2425 }
2426
2427 static int kvm_put_mp_state(X86CPU *cpu)
2428 {
2429 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
2430
2431 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2432 }
2433
2434 static int kvm_get_mp_state(X86CPU *cpu)
2435 {
2436 CPUState *cs = CPU(cpu);
2437 CPUX86State *env = &cpu->env;
2438 struct kvm_mp_state mp_state;
2439 int ret;
2440
2441 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
2442 if (ret < 0) {
2443 return ret;
2444 }
2445 env->mp_state = mp_state.mp_state;
2446 if (kvm_irqchip_in_kernel()) {
2447 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
2448 }
2449 return 0;
2450 }
2451
2452 static int kvm_get_apic(X86CPU *cpu)
2453 {
2454 DeviceState *apic = cpu->apic_state;
2455 struct kvm_lapic_state kapic;
2456 int ret;
2457
2458 if (apic && kvm_irqchip_in_kernel()) {
2459 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
2460 if (ret < 0) {
2461 return ret;
2462 }
2463
2464 kvm_get_apic_state(apic, &kapic);
2465 }
2466 return 0;
2467 }
2468
2469 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
2470 {
2471 CPUState *cs = CPU(cpu);
2472 CPUX86State *env = &cpu->env;
2473 struct kvm_vcpu_events events = {};
2474
2475 if (!kvm_has_vcpu_events()) {
2476 return 0;
2477 }
2478
2479 events.exception.injected = (env->exception_injected >= 0);
2480 events.exception.nr = env->exception_injected;
2481 events.exception.has_error_code = env->has_error_code;
2482 events.exception.error_code = env->error_code;
2483 events.exception.pad = 0;
2484
2485 events.interrupt.injected = (env->interrupt_injected >= 0);
2486 events.interrupt.nr = env->interrupt_injected;
2487 events.interrupt.soft = env->soft_interrupt;
2488
2489 events.nmi.injected = env->nmi_injected;
2490 events.nmi.pending = env->nmi_pending;
2491 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
2492 events.nmi.pad = 0;
2493
2494 events.sipi_vector = env->sipi_vector;
2495 events.flags = 0;
2496
2497 if (has_msr_smbase) {
2498 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
2499 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
2500 if (kvm_irqchip_in_kernel()) {
2501 /* As soon as these are moved to the kernel, remove them
2502 * from cs->interrupt_request.
2503 */
2504 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
2505 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
2506 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
2507 } else {
2508 /* Keep these in cs->interrupt_request. */
2509 events.smi.pending = 0;
2510 events.smi.latched_init = 0;
2511 }
2512 /* Stop SMI delivery on old machine types to avoid a reboot
2513 * on an inward migration of an old VM.
2514 */
2515 if (!cpu->kvm_no_smi_migration) {
2516 events.flags |= KVM_VCPUEVENT_VALID_SMM;
2517 }
2518 }
2519
2520 if (level >= KVM_PUT_RESET_STATE) {
2521 events.flags |=
2522 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
2523 }
2524
2525 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
2526 }
2527
2528 static int kvm_get_vcpu_events(X86CPU *cpu)
2529 {
2530 CPUX86State *env = &cpu->env;
2531 struct kvm_vcpu_events events;
2532 int ret;
2533
2534 if (!kvm_has_vcpu_events()) {
2535 return 0;
2536 }
2537
2538 memset(&events, 0, sizeof(events));
2539 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
2540 if (ret < 0) {
2541 return ret;
2542 }
2543 env->exception_injected =
2544 events.exception.injected ? events.exception.nr : -1;
2545 env->has_error_code = events.exception.has_error_code;
2546 env->error_code = events.exception.error_code;
2547
2548 env->interrupt_injected =
2549 events.interrupt.injected ? events.interrupt.nr : -1;
2550 env->soft_interrupt = events.interrupt.soft;
2551
2552 env->nmi_injected = events.nmi.injected;
2553 env->nmi_pending = events.nmi.pending;
2554 if (events.nmi.masked) {
2555 env->hflags2 |= HF2_NMI_MASK;
2556 } else {
2557 env->hflags2 &= ~HF2_NMI_MASK;
2558 }
2559
2560 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
2561 if (events.smi.smm) {
2562 env->hflags |= HF_SMM_MASK;
2563 } else {
2564 env->hflags &= ~HF_SMM_MASK;
2565 }
2566 if (events.smi.pending) {
2567 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2568 } else {
2569 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2570 }
2571 if (events.smi.smm_inside_nmi) {
2572 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
2573 } else {
2574 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
2575 }
2576 if (events.smi.latched_init) {
2577 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2578 } else {
2579 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2580 }
2581 }
2582
2583 env->sipi_vector = events.sipi_vector;
2584
2585 return 0;
2586 }
2587
2588 static int kvm_guest_debug_workarounds(X86CPU *cpu)
2589 {
2590 CPUState *cs = CPU(cpu);
2591 CPUX86State *env = &cpu->env;
2592 int ret = 0;
2593 unsigned long reinject_trap = 0;
2594
2595 if (!kvm_has_vcpu_events()) {
2596 if (env->exception_injected == 1) {
2597 reinject_trap = KVM_GUESTDBG_INJECT_DB;
2598 } else if (env->exception_injected == 3) {
2599 reinject_trap = KVM_GUESTDBG_INJECT_BP;
2600 }
2601 env->exception_injected = -1;
2602 }
2603
2604 /*
2605 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
2606 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
2607 * by updating the debug state once again if single-stepping is on.
2608 * Another reason to call kvm_update_guest_debug here is a pending debug
2609 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
2610 * reinject them via SET_GUEST_DEBUG.
2611 */
2612 if (reinject_trap ||
2613 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
2614 ret = kvm_update_guest_debug(cs, reinject_trap);
2615 }
2616 return ret;
2617 }
2618
2619 static int kvm_put_debugregs(X86CPU *cpu)
2620 {
2621 CPUX86State *env = &cpu->env;
2622 struct kvm_debugregs dbgregs;
2623 int i;
2624
2625 if (!kvm_has_debugregs()) {
2626 return 0;
2627 }
2628
2629 for (i = 0; i < 4; i++) {
2630 dbgregs.db[i] = env->dr[i];
2631 }
2632 dbgregs.dr6 = env->dr[6];
2633 dbgregs.dr7 = env->dr[7];
2634 dbgregs.flags = 0;
2635
2636 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
2637 }
2638
2639 static int kvm_get_debugregs(X86CPU *cpu)
2640 {
2641 CPUX86State *env = &cpu->env;
2642 struct kvm_debugregs dbgregs;
2643 int i, ret;
2644
2645 if (!kvm_has_debugregs()) {
2646 return 0;
2647 }
2648
2649 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
2650 if (ret < 0) {
2651 return ret;
2652 }
2653 for (i = 0; i < 4; i++) {
2654 env->dr[i] = dbgregs.db[i];
2655 }
2656 env->dr[4] = env->dr[6] = dbgregs.dr6;
2657 env->dr[5] = env->dr[7] = dbgregs.dr7;
2658
2659 return 0;
2660 }
2661
2662 int kvm_arch_put_registers(CPUState *cpu, int level)
2663 {
2664 X86CPU *x86_cpu = X86_CPU(cpu);
2665 int ret;
2666
2667 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
2668
2669 if (level >= KVM_PUT_RESET_STATE) {
2670 ret = kvm_put_msr_feature_control(x86_cpu);
2671 if (ret < 0) {
2672 return ret;
2673 }
2674 }
2675
2676 if (level == KVM_PUT_FULL_STATE) {
2677 /* We don't check for kvm_arch_set_tsc_khz() errors here,
2678 * because TSC frequency mismatch shouldn't abort migration,
2679 * unless the user explicitly asked for a more strict TSC
2680 * setting (e.g. using an explicit "tsc-freq" option).
2681 */
2682 kvm_arch_set_tsc_khz(cpu);
2683 }
2684
2685 ret = kvm_getput_regs(x86_cpu, 1);
2686 if (ret < 0) {
2687 return ret;
2688 }
2689 ret = kvm_put_xsave(x86_cpu);
2690 if (ret < 0) {
2691 return ret;
2692 }
2693 ret = kvm_put_xcrs(x86_cpu);
2694 if (ret < 0) {
2695 return ret;
2696 }
2697 ret = kvm_put_sregs(x86_cpu);
2698 if (ret < 0) {
2699 return ret;
2700 }
2701 /* must be before kvm_put_msrs */
2702 ret = kvm_inject_mce_oldstyle(x86_cpu);
2703 if (ret < 0) {
2704 return ret;
2705 }
2706 ret = kvm_put_msrs(x86_cpu, level);
2707 if (ret < 0) {
2708 return ret;
2709 }
2710 if (level >= KVM_PUT_RESET_STATE) {
2711 ret = kvm_put_mp_state(x86_cpu);
2712 if (ret < 0) {
2713 return ret;
2714 }
2715 }
2716
2717 ret = kvm_put_tscdeadline_msr(x86_cpu);
2718 if (ret < 0) {
2719 return ret;
2720 }
2721
2722 ret = kvm_put_vcpu_events(x86_cpu, level);
2723 if (ret < 0) {
2724 return ret;
2725 }
2726 ret = kvm_put_debugregs(x86_cpu);
2727 if (ret < 0) {
2728 return ret;
2729 }
2730 /* must be last */
2731 ret = kvm_guest_debug_workarounds(x86_cpu);
2732 if (ret < 0) {
2733 return ret;
2734 }
2735 return 0;
2736 }
2737
2738 int kvm_arch_get_registers(CPUState *cs)
2739 {
2740 X86CPU *cpu = X86_CPU(cs);
2741 int ret;
2742
2743 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
2744
2745 ret = kvm_getput_regs(cpu, 0);
2746 if (ret < 0) {
2747 goto out;
2748 }
2749 ret = kvm_get_xsave(cpu);
2750 if (ret < 0) {
2751 goto out;
2752 }
2753 ret = kvm_get_xcrs(cpu);
2754 if (ret < 0) {
2755 goto out;
2756 }
2757 ret = kvm_get_sregs(cpu);
2758 if (ret < 0) {
2759 goto out;
2760 }
2761 ret = kvm_get_msrs(cpu);
2762 if (ret < 0) {
2763 goto out;
2764 }
2765 ret = kvm_get_mp_state(cpu);
2766 if (ret < 0) {
2767 goto out;
2768 }
2769 ret = kvm_get_apic(cpu);
2770 if (ret < 0) {
2771 goto out;
2772 }
2773 ret = kvm_get_vcpu_events(cpu);
2774 if (ret < 0) {
2775 goto out;
2776 }
2777 ret = kvm_get_debugregs(cpu);
2778 if (ret < 0) {
2779 goto out;
2780 }
2781 ret = 0;
2782 out:
2783 cpu_sync_bndcs_hflags(&cpu->env);
2784 return ret;
2785 }
2786
2787 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2788 {
2789 X86CPU *x86_cpu = X86_CPU(cpu);
2790 CPUX86State *env = &x86_cpu->env;
2791 int ret;
2792
2793 /* Inject NMI */
2794 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
2795 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
2796 qemu_mutex_lock_iothread();
2797 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
2798 qemu_mutex_unlock_iothread();
2799 DPRINTF("injected NMI\n");
2800 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
2801 if (ret < 0) {
2802 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
2803 strerror(-ret));
2804 }
2805 }
2806 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
2807 qemu_mutex_lock_iothread();
2808 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
2809 qemu_mutex_unlock_iothread();
2810 DPRINTF("injected SMI\n");
2811 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
2812 if (ret < 0) {
2813 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
2814 strerror(-ret));
2815 }
2816 }
2817 }
2818
2819 if (!kvm_pic_in_kernel()) {
2820 qemu_mutex_lock_iothread();
2821 }
2822
2823 /* Force the VCPU out of its inner loop to process any INIT requests
2824 * or (for userspace APIC, but it is cheap to combine the checks here)
2825 * pending TPR access reports.
2826 */
2827 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
2828 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
2829 !(env->hflags & HF_SMM_MASK)) {
2830 cpu->exit_request = 1;
2831 }
2832 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
2833 cpu->exit_request = 1;
2834 }
2835 }
2836
2837 if (!kvm_pic_in_kernel()) {
2838 /* Try to inject an interrupt if the guest can accept it */
2839 if (run->ready_for_interrupt_injection &&
2840 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
2841 (env->eflags & IF_MASK)) {
2842 int irq;
2843
2844 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
2845 irq = cpu_get_pic_interrupt(env);
2846 if (irq >= 0) {
2847 struct kvm_interrupt intr;
2848
2849 intr.irq = irq;
2850 DPRINTF("injected interrupt %d\n", irq);
2851 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
2852 if (ret < 0) {
2853 fprintf(stderr,
2854 "KVM: injection failed, interrupt lost (%s)\n",
2855 strerror(-ret));
2856 }
2857 }
2858 }
2859
2860 /* If we have an interrupt but the guest is not ready to receive an
2861 * interrupt, request an interrupt window exit. This will
2862 * cause a return to userspace as soon as the guest is ready to
2863 * receive interrupts. */
2864 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
2865 run->request_interrupt_window = 1;
2866 } else {
2867 run->request_interrupt_window = 0;
2868 }
2869
2870 DPRINTF("setting tpr\n");
2871 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
2872
2873 qemu_mutex_unlock_iothread();
2874 }
2875 }
2876
2877 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
2878 {
2879 X86CPU *x86_cpu = X86_CPU(cpu);
2880 CPUX86State *env = &x86_cpu->env;
2881
2882 if (run->flags & KVM_RUN_X86_SMM) {
2883 env->hflags |= HF_SMM_MASK;
2884 } else {
2885 env->hflags &= ~HF_SMM_MASK;
2886 }
2887 if (run->if_flag) {
2888 env->eflags |= IF_MASK;
2889 } else {
2890 env->eflags &= ~IF_MASK;
2891 }
2892
2893 /* We need to protect the apic state against concurrent accesses from
2894 * different threads in case the userspace irqchip is used. */
2895 if (!kvm_irqchip_in_kernel()) {
2896 qemu_mutex_lock_iothread();
2897 }
2898 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
2899 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
2900 if (!kvm_irqchip_in_kernel()) {
2901 qemu_mutex_unlock_iothread();
2902 }
2903 return cpu_get_mem_attrs(env);
2904 }
2905
2906 int kvm_arch_process_async_events(CPUState *cs)
2907 {
2908 X86CPU *cpu = X86_CPU(cs);
2909 CPUX86State *env = &cpu->env;
2910
2911 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
2912 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
2913 assert(env->mcg_cap);
2914
2915 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
2916
2917 kvm_cpu_synchronize_state(cs);
2918
2919 if (env->exception_injected == EXCP08_DBLE) {
2920 /* this means triple fault */
2921 qemu_system_reset_request();
2922 cs->exit_request = 1;
2923 return 0;
2924 }
2925 env->exception_injected = EXCP12_MCHK;
2926 env->has_error_code = 0;
2927
2928 cs->halted = 0;
2929 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
2930 env->mp_state = KVM_MP_STATE_RUNNABLE;
2931 }
2932 }
2933
2934 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
2935 !(env->hflags & HF_SMM_MASK)) {
2936 kvm_cpu_synchronize_state(cs);
2937 do_cpu_init(cpu);
2938 }
2939
2940 if (kvm_irqchip_in_kernel()) {
2941 return 0;
2942 }
2943
2944 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2945 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
2946 apic_poll_irq(cpu->apic_state);
2947 }
2948 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2949 (env->eflags & IF_MASK)) ||
2950 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2951 cs->halted = 0;
2952 }
2953 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
2954 kvm_cpu_synchronize_state(cs);
2955 do_cpu_sipi(cpu);
2956 }
2957 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
2958 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
2959 kvm_cpu_synchronize_state(cs);
2960 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
2961 env->tpr_access_type);
2962 }
2963
2964 return cs->halted;
2965 }
2966
2967 static int kvm_handle_halt(X86CPU *cpu)
2968 {
2969 CPUState *cs = CPU(cpu);
2970 CPUX86State *env = &cpu->env;
2971
2972 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2973 (env->eflags & IF_MASK)) &&
2974 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2975 cs->halted = 1;
2976 return EXCP_HLT;
2977 }
2978
2979 return 0;
2980 }
2981
2982 static int kvm_handle_tpr_access(X86CPU *cpu)
2983 {
2984 CPUState *cs = CPU(cpu);
2985 struct kvm_run *run = cs->kvm_run;
2986
2987 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
2988 run->tpr_access.is_write ? TPR_ACCESS_WRITE
2989 : TPR_ACCESS_READ);
2990 return 1;
2991 }
2992
2993 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2994 {
2995 static const uint8_t int3 = 0xcc;
2996
2997 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
2998 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
2999 return -EINVAL;
3000 }
3001 return 0;
3002 }
3003
3004 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
3005 {
3006 uint8_t int3;
3007
3008 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
3009 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
3010 return -EINVAL;
3011 }
3012 return 0;
3013 }
3014
3015 static struct {
3016 target_ulong addr;
3017 int len;
3018 int type;
3019 } hw_breakpoint[4];
3020
3021 static int nb_hw_breakpoint;
3022
3023 static int find_hw_breakpoint(target_ulong addr, int len, int type)
3024 {
3025 int n;
3026
3027 for (n = 0; n < nb_hw_breakpoint; n++) {
3028 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
3029 (hw_breakpoint[n].len == len || len == -1)) {
3030 return n;
3031 }
3032 }
3033 return -1;
3034 }
3035
3036 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
3037 target_ulong len, int type)
3038 {
3039 switch (type) {
3040 case GDB_BREAKPOINT_HW:
3041 len = 1;
3042 break;
3043 case GDB_WATCHPOINT_WRITE:
3044 case GDB_WATCHPOINT_ACCESS:
3045 switch (len) {
3046 case 1:
3047 break;
3048 case 2:
3049 case 4:
3050 case 8:
3051 if (addr & (len - 1)) {
3052 return -EINVAL;
3053 }
3054 break;
3055 default:
3056 return -EINVAL;
3057 }
3058 break;
3059 default:
3060 return -ENOSYS;
3061 }
3062
3063 if (nb_hw_breakpoint == 4) {
3064 return -ENOBUFS;
3065 }
3066 if (find_hw_breakpoint(addr, len, type) >= 0) {
3067 return -EEXIST;
3068 }
3069 hw_breakpoint[nb_hw_breakpoint].addr = addr;
3070 hw_breakpoint[nb_hw_breakpoint].len = len;
3071 hw_breakpoint[nb_hw_breakpoint].type = type;
3072 nb_hw_breakpoint++;
3073
3074 return 0;
3075 }
3076
3077 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
3078 target_ulong len, int type)
3079 {
3080 int n;
3081
3082 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
3083 if (n < 0) {
3084 return -ENOENT;
3085 }
3086 nb_hw_breakpoint--;
3087 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
3088
3089 return 0;
3090 }
3091
3092 void kvm_arch_remove_all_hw_breakpoints(void)
3093 {
3094 nb_hw_breakpoint = 0;
3095 }
3096
3097 static CPUWatchpoint hw_watchpoint;
3098
3099 static int kvm_handle_debug(X86CPU *cpu,
3100 struct kvm_debug_exit_arch *arch_info)
3101 {
3102 CPUState *cs = CPU(cpu);
3103 CPUX86State *env = &cpu->env;
3104 int ret = 0;
3105 int n;
3106
3107 if (arch_info->exception == 1) {
3108 if (arch_info->dr6 & (1 << 14)) {
3109 if (cs->singlestep_enabled) {
3110 ret = EXCP_DEBUG;
3111 }
3112 } else {
3113 for (n = 0; n < 4; n++) {
3114 if (arch_info->dr6 & (1 << n)) {
3115 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
3116 case 0x0:
3117 ret = EXCP_DEBUG;
3118 break;
3119 case 0x1:
3120 ret = EXCP_DEBUG;
3121 cs->watchpoint_hit = &hw_watchpoint;
3122 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
3123 hw_watchpoint.flags = BP_MEM_WRITE;
3124 break;
3125 case 0x3:
3126 ret = EXCP_DEBUG;
3127 cs->watchpoint_hit = &hw_watchpoint;
3128 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
3129 hw_watchpoint.flags = BP_MEM_ACCESS;
3130 break;
3131 }
3132 }
3133 }
3134 }
3135 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
3136 ret = EXCP_DEBUG;
3137 }
3138 if (ret == 0) {
3139 cpu_synchronize_state(cs);
3140 assert(env->exception_injected == -1);
3141
3142 /* pass to guest */
3143 env->exception_injected = arch_info->exception;
3144 env->has_error_code = 0;
3145 }
3146
3147 return ret;
3148 }
3149
3150 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
3151 {
3152 const uint8_t type_code[] = {
3153 [GDB_BREAKPOINT_HW] = 0x0,
3154 [GDB_WATCHPOINT_WRITE] = 0x1,
3155 [GDB_WATCHPOINT_ACCESS] = 0x3
3156 };
3157 const uint8_t len_code[] = {
3158 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
3159 };
3160 int n;
3161
3162 if (kvm_sw_breakpoints_active(cpu)) {
3163 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
3164 }
3165 if (nb_hw_breakpoint > 0) {
3166 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
3167 dbg->arch.debugreg[7] = 0x0600;
3168 for (n = 0; n < nb_hw_breakpoint; n++) {
3169 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
3170 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
3171 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
3172 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
3173 }
3174 }
3175 }
3176
3177 static bool host_supports_vmx(void)
3178 {
3179 uint32_t ecx, unused;
3180
3181 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
3182 return ecx & CPUID_EXT_VMX;
3183 }
3184
3185 #define VMX_INVALID_GUEST_STATE 0x80000021
3186
3187 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3188 {
3189 X86CPU *cpu = X86_CPU(cs);
3190 uint64_t code;
3191 int ret;
3192
3193 switch (run->exit_reason) {
3194 case KVM_EXIT_HLT:
3195 DPRINTF("handle_hlt\n");
3196 qemu_mutex_lock_iothread();
3197 ret = kvm_handle_halt(cpu);
3198 qemu_mutex_unlock_iothread();
3199 break;
3200 case KVM_EXIT_SET_TPR:
3201 ret = 0;
3202 break;
3203 case KVM_EXIT_TPR_ACCESS:
3204 qemu_mutex_lock_iothread();
3205 ret = kvm_handle_tpr_access(cpu);
3206 qemu_mutex_unlock_iothread();
3207 break;
3208 case KVM_EXIT_FAIL_ENTRY:
3209 code = run->fail_entry.hardware_entry_failure_reason;
3210 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
3211 code);
3212 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
3213 fprintf(stderr,
3214 "\nIf you're running a guest on an Intel machine without "
3215 "unrestricted mode\n"
3216 "support, the failure can be most likely due to the guest "
3217 "entering an invalid\n"
3218 "state for Intel VT. For example, the guest maybe running "
3219 "in big real mode\n"
3220 "which is not supported on less recent Intel processors."
3221 "\n\n");
3222 }
3223 ret = -1;
3224 break;
3225 case KVM_EXIT_EXCEPTION:
3226 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
3227 run->ex.exception, run->ex.error_code);
3228 ret = -1;
3229 break;
3230 case KVM_EXIT_DEBUG:
3231 DPRINTF("kvm_exit_debug\n");
3232 qemu_mutex_lock_iothread();
3233 ret = kvm_handle_debug(cpu, &run->debug.arch);
3234 qemu_mutex_unlock_iothread();
3235 break;
3236 case KVM_EXIT_HYPERV:
3237 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
3238 break;
3239 case KVM_EXIT_IOAPIC_EOI:
3240 ioapic_eoi_broadcast(run->eoi.vector);
3241 ret = 0;
3242 break;
3243 default:
3244 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
3245 ret = -1;
3246 break;
3247 }
3248
3249 return ret;
3250 }
3251
3252 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
3253 {
3254 X86CPU