a513fdd62d19856c8e2472d9500a98342b4a85db
[qemu.git] / hw / xen / xen_pt.c
1 /*
2 * Copyright (c) 2007, Neocleus Corporation.
3 * Copyright (c) 2007, Intel Corporation.
4 *
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 *
8 * Alex Novik <alex@neocleus.com>
9 * Allen Kay <allen.m.kay@intel.com>
10 * Guy Zana <guy@neocleus.com>
11 *
12 * This file implements direct PCI assignment to a HVM guest
13 */
14
15 /*
16 * Interrupt Disable policy:
17 *
18 * INTx interrupt:
19 * Initialize(register_real_device)
20 * Map INTx(xc_physdev_map_pirq):
21 * <fail>
22 * - Set real Interrupt Disable bit to '1'.
23 * - Set machine_irq and assigned_device->machine_irq to '0'.
24 * * Don't bind INTx.
25 *
26 * Bind INTx(xc_domain_bind_pt_pci_irq):
27 * <fail>
28 * - Set real Interrupt Disable bit to '1'.
29 * - Unmap INTx.
30 * - Decrement xen_pt_mapped_machine_irq[machine_irq]
31 * - Set assigned_device->machine_irq to '0'.
32 *
33 * Write to Interrupt Disable bit by guest software(xen_pt_cmd_reg_write)
34 * Write '0'
35 * - Set real bit to '0' if assigned_device->machine_irq isn't '0'.
36 *
37 * Write '1'
38 * - Set real bit to '1'.
39 *
40 * MSI interrupt:
41 * Initialize MSI register(xen_pt_msi_setup, xen_pt_msi_update)
42 * Bind MSI(xc_domain_update_msi_irq)
43 * <fail>
44 * - Unmap MSI.
45 * - Set dev->msi->pirq to '-1'.
46 *
47 * MSI-X interrupt:
48 * Initialize MSI-X register(xen_pt_msix_update_one)
49 * Bind MSI-X(xc_domain_update_msi_irq)
50 * <fail>
51 * - Unmap MSI-X.
52 * - Set entry->pirq to '-1'.
53 */
54
55 #include "qemu/osdep.h"
56 #include "qapi/error.h"
57 #include <sys/ioctl.h>
58
59 #include "hw/pci/pci.h"
60 #include "hw/qdev-properties.h"
61 #include "hw/qdev-properties-system.h"
62 #include "hw/xen/xen.h"
63 #include "hw/i386/pc.h"
64 #include "hw/xen/xen-legacy-backend.h"
65 #include "xen_pt.h"
66 #include "qemu/range.h"
67 #include "exec/address-spaces.h"
68
69 static bool has_igd_gfx_passthru;
70
71 bool xen_igd_gfx_pt_enabled(void)
72 {
73 return has_igd_gfx_passthru;
74 }
75
76 void xen_igd_gfx_pt_set(bool value, Error **errp)
77 {
78 has_igd_gfx_passthru = value;
79 }
80
81 #define XEN_PT_NR_IRQS (256)
82 static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0};
83
84 void xen_pt_log(const PCIDevice *d, const char *f, ...)
85 {
86 va_list ap;
87
88 va_start(ap, f);
89 if (d) {
90 fprintf(stderr, "[%02x:%02x.%d] ", pci_dev_bus_num(d),
91 PCI_SLOT(d->devfn), PCI_FUNC(d->devfn));
92 }
93 vfprintf(stderr, f, ap);
94 va_end(ap);
95 }
96
97 /* Config Space */
98
99 static int xen_pt_pci_config_access_check(PCIDevice *d, uint32_t addr, int len)
100 {
101 /* check offset range */
102 if (addr > 0xFF) {
103 XEN_PT_ERR(d, "Failed to access register with offset exceeding 0xFF. "
104 "(addr: 0x%02x, len: %d)\n", addr, len);
105 return -1;
106 }
107
108 /* check read size */
109 if ((len != 1) && (len != 2) && (len != 4)) {
110 XEN_PT_ERR(d, "Failed to access register with invalid access length. "
111 "(addr: 0x%02x, len: %d)\n", addr, len);
112 return -1;
113 }
114
115 /* check offset alignment */
116 if (addr & (len - 1)) {
117 XEN_PT_ERR(d, "Failed to access register with invalid access size "
118 "alignment. (addr: 0x%02x, len: %d)\n", addr, len);
119 return -1;
120 }
121
122 return 0;
123 }
124
125 int xen_pt_bar_offset_to_index(uint32_t offset)
126 {
127 int index = 0;
128
129 /* check Exp ROM BAR */
130 if (offset == PCI_ROM_ADDRESS) {
131 return PCI_ROM_SLOT;
132 }
133
134 /* calculate BAR index */
135 index = (offset - PCI_BASE_ADDRESS_0) >> 2;
136 if (index >= PCI_NUM_REGIONS) {
137 return -1;
138 }
139
140 return index;
141 }
142
143 static uint32_t xen_pt_pci_read_config(PCIDevice *d, uint32_t addr, int len)
144 {
145 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
146 uint32_t val = 0;
147 XenPTRegGroup *reg_grp_entry = NULL;
148 XenPTReg *reg_entry = NULL;
149 int rc = 0;
150 int emul_len = 0;
151 uint32_t find_addr = addr;
152
153 if (xen_pt_pci_config_access_check(d, addr, len)) {
154 goto exit;
155 }
156
157 /* find register group entry */
158 reg_grp_entry = xen_pt_find_reg_grp(s, addr);
159 if (reg_grp_entry) {
160 /* check 0-Hardwired register group */
161 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
162 /* no need to emulate, just return 0 */
163 val = 0;
164 goto exit;
165 }
166 }
167
168 /* read I/O device register value */
169 rc = xen_host_pci_get_block(&s->real_device, addr, (uint8_t *)&val, len);
170 if (rc < 0) {
171 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
172 memset(&val, 0xff, len);
173 }
174
175 /* just return the I/O device register value for
176 * passthrough type register group */
177 if (reg_grp_entry == NULL) {
178 goto exit;
179 }
180
181 /* adjust the read value to appropriate CFC-CFF window */
182 val <<= (addr & 3) << 3;
183 emul_len = len;
184
185 /* loop around the guest requested size */
186 while (emul_len > 0) {
187 /* find register entry to be emulated */
188 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
189 if (reg_entry) {
190 XenPTRegInfo *reg = reg_entry->reg;
191 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
192 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
193 uint8_t *ptr_val = NULL;
194
195 valid_mask <<= (find_addr - real_offset) << 3;
196 ptr_val = (uint8_t *)&val + (real_offset & 3);
197
198 /* do emulation based on register size */
199 switch (reg->size) {
200 case 1:
201 if (reg->u.b.read) {
202 rc = reg->u.b.read(s, reg_entry, ptr_val, valid_mask);
203 }
204 break;
205 case 2:
206 if (reg->u.w.read) {
207 rc = reg->u.w.read(s, reg_entry,
208 (uint16_t *)ptr_val, valid_mask);
209 }
210 break;
211 case 4:
212 if (reg->u.dw.read) {
213 rc = reg->u.dw.read(s, reg_entry,
214 (uint32_t *)ptr_val, valid_mask);
215 }
216 break;
217 }
218
219 if (rc < 0) {
220 xen_shutdown_fatal_error("Internal error: Invalid read "
221 "emulation. (%s, rc: %d)\n",
222 __func__, rc);
223 return 0;
224 }
225
226 /* calculate next address to find */
227 emul_len -= reg->size;
228 if (emul_len > 0) {
229 find_addr = real_offset + reg->size;
230 }
231 } else {
232 /* nothing to do with passthrough type register,
233 * continue to find next byte */
234 emul_len--;
235 find_addr++;
236 }
237 }
238
239 /* need to shift back before returning them to pci bus emulator */
240 val >>= ((addr & 3) << 3);
241
242 exit:
243 XEN_PT_LOG_CONFIG(d, addr, val, len);
244 return val;
245 }
246
247 static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
248 uint32_t val, int len)
249 {
250 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
251 int index = 0;
252 XenPTRegGroup *reg_grp_entry = NULL;
253 int rc = 0;
254 uint32_t read_val = 0, wb_mask;
255 int emul_len = 0;
256 XenPTReg *reg_entry = NULL;
257 uint32_t find_addr = addr;
258 XenPTRegInfo *reg = NULL;
259 bool wp_flag = false;
260
261 if (xen_pt_pci_config_access_check(d, addr, len)) {
262 return;
263 }
264
265 XEN_PT_LOG_CONFIG(d, addr, val, len);
266
267 /* check unused BAR register */
268 index = xen_pt_bar_offset_to_index(addr);
269 if ((index >= 0) && (val != 0)) {
270 uint32_t chk = val;
271
272 if (index == PCI_ROM_SLOT)
273 chk |= (uint32_t)~PCI_ROM_ADDRESS_MASK;
274
275 if ((chk != XEN_PT_BAR_ALLF) &&
276 (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED)) {
277 XEN_PT_WARN(d, "Guest attempt to set address to unused "
278 "Base Address Register. (addr: 0x%02x, len: %d)\n",
279 addr, len);
280 }
281 }
282
283 /* find register group entry */
284 reg_grp_entry = xen_pt_find_reg_grp(s, addr);
285 if (reg_grp_entry) {
286 /* check 0-Hardwired register group */
287 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
288 /* ignore silently */
289 XEN_PT_WARN(d, "Access to 0-Hardwired register. "
290 "(addr: 0x%02x, len: %d)\n", addr, len);
291 return;
292 }
293 }
294
295 rc = xen_host_pci_get_block(&s->real_device, addr,
296 (uint8_t *)&read_val, len);
297 if (rc < 0) {
298 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
299 memset(&read_val, 0xff, len);
300 wb_mask = 0;
301 } else {
302 wb_mask = 0xFFFFFFFF >> ((4 - len) << 3);
303 }
304
305 /* pass directly to the real device for passthrough type register group */
306 if (reg_grp_entry == NULL) {
307 if (!s->permissive) {
308 wb_mask = 0;
309 wp_flag = true;
310 }
311 goto out;
312 }
313
314 memory_region_transaction_begin();
315 pci_default_write_config(d, addr, val, len);
316
317 /* adjust the read and write value to appropriate CFC-CFF window */
318 read_val <<= (addr & 3) << 3;
319 val <<= (addr & 3) << 3;
320 emul_len = len;
321
322 /* loop around the guest requested size */
323 while (emul_len > 0) {
324 /* find register entry to be emulated */
325 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
326 if (reg_entry) {
327 reg = reg_entry->reg;
328 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
329 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
330 uint8_t *ptr_val = NULL;
331 uint32_t wp_mask = reg->emu_mask | reg->ro_mask;
332
333 valid_mask <<= (find_addr - real_offset) << 3;
334 ptr_val = (uint8_t *)&val + (real_offset & 3);
335 if (!s->permissive) {
336 wp_mask |= reg->res_mask;
337 }
338 if (wp_mask == (0xFFFFFFFF >> ((4 - reg->size) << 3))) {
339 wb_mask &= ~((wp_mask >> ((find_addr - real_offset) << 3))
340 << ((len - emul_len) << 3));
341 }
342
343 /* do emulation based on register size */
344 switch (reg->size) {
345 case 1:
346 if (reg->u.b.write) {
347 rc = reg->u.b.write(s, reg_entry, ptr_val,
348 read_val >> ((real_offset & 3) << 3),
349 valid_mask);
350 }
351 break;
352 case 2:
353 if (reg->u.w.write) {
354 rc = reg->u.w.write(s, reg_entry, (uint16_t *)ptr_val,
355 (read_val >> ((real_offset & 3) << 3)),
356 valid_mask);
357 }
358 break;
359 case 4:
360 if (reg->u.dw.write) {
361 rc = reg->u.dw.write(s, reg_entry, (uint32_t *)ptr_val,
362 (read_val >> ((real_offset & 3) << 3)),
363 valid_mask);
364 }
365 break;
366 }
367
368 if (rc < 0) {
369 xen_shutdown_fatal_error("Internal error: Invalid write"
370 " emulation. (%s, rc: %d)\n",
371 __func__, rc);
372 return;
373 }
374
375 /* calculate next address to find */
376 emul_len -= reg->size;
377 if (emul_len > 0) {
378 find_addr = real_offset + reg->size;
379 }
380 } else {
381 /* nothing to do with passthrough type register,
382 * continue to find next byte */
383 if (!s->permissive) {
384 wb_mask &= ~(0xff << ((len - emul_len) << 3));
385 /* Unused BARs will make it here, but we don't want to issue
386 * warnings for writes to them (bogus writes get dealt with
387 * above).
388 */
389 if (index < 0) {
390 wp_flag = true;
391 }
392 }
393 emul_len--;
394 find_addr++;
395 }
396 }
397
398 /* need to shift back before passing them to xen_host_pci_set_block. */
399 val >>= (addr & 3) << 3;
400
401 memory_region_transaction_commit();
402
403 out:
404 if (wp_flag && !s->permissive_warned) {
405 s->permissive_warned = true;
406 xen_pt_log(d, "Write-back to unknown field 0x%02x (partially) inhibited (0x%0*x)\n",
407 addr, len * 2, wb_mask);
408 xen_pt_log(d, "If the device doesn't work, try enabling permissive mode\n");
409 xen_pt_log(d, "(unsafe) and if it helps report the problem to xen-devel\n");
410 }
411 for (index = 0; wb_mask; index += len) {
412 /* unknown regs are passed through */
413 while (!(wb_mask & 0xff)) {
414 index++;
415 wb_mask >>= 8;
416 }
417 len = 0;
418 do {
419 len++;
420 wb_mask >>= 8;
421 } while (wb_mask & 0xff);
422 rc = xen_host_pci_set_block(&s->real_device, addr + index,
423 (uint8_t *)&val + index, len);
424
425 if (rc < 0) {
426 XEN_PT_ERR(d, "xen_host_pci_set_block failed. return value: %d.\n", rc);
427 }
428 }
429 }
430
431 /* register regions */
432
433 static uint64_t xen_pt_bar_read(void *o, hwaddr addr,
434 unsigned size)
435 {
436 PCIDevice *d = o;
437 /* if this function is called, that probably means that there is a
438 * misconfiguration of the IOMMU. */
439 XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"TARGET_FMT_plx"\n",
440 addr);
441 return 0;
442 }
443 static void xen_pt_bar_write(void *o, hwaddr addr, uint64_t val,
444 unsigned size)
445 {
446 PCIDevice *d = o;
447 /* Same comment as xen_pt_bar_read function */
448 XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"TARGET_FMT_plx"\n",
449 addr);
450 }
451
452 static const MemoryRegionOps ops = {
453 .endianness = DEVICE_NATIVE_ENDIAN,
454 .read = xen_pt_bar_read,
455 .write = xen_pt_bar_write,
456 };
457
458 static int xen_pt_register_regions(XenPCIPassthroughState *s, uint16_t *cmd)
459 {
460 int i = 0;
461 XenHostPCIDevice *d = &s->real_device;
462
463 /* Register PIO/MMIO BARs */
464 for (i = 0; i < PCI_ROM_SLOT; i++) {
465 XenHostPCIIORegion *r = &d->io_regions[i];
466 uint8_t type;
467
468 if (r->base_addr == 0 || r->size == 0) {
469 continue;
470 }
471
472 s->bases[i].access.u = r->base_addr;
473
474 if (r->type & XEN_HOST_PCI_REGION_TYPE_IO) {
475 type = PCI_BASE_ADDRESS_SPACE_IO;
476 *cmd |= PCI_COMMAND_IO;
477 } else {
478 type = PCI_BASE_ADDRESS_SPACE_MEMORY;
479 if (r->type & XEN_HOST_PCI_REGION_TYPE_PREFETCH) {
480 type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
481 }
482 if (r->type & XEN_HOST_PCI_REGION_TYPE_MEM_64) {
483 type |= PCI_BASE_ADDRESS_MEM_TYPE_64;
484 }
485 *cmd |= PCI_COMMAND_MEMORY;
486 }
487
488 memory_region_init_io(&s->bar[i], OBJECT(s), &ops, &s->dev,
489 "xen-pci-pt-bar", r->size);
490 pci_register_bar(&s->dev, i, type, &s->bar[i]);
491
492 XEN_PT_LOG(&s->dev, "IO region %i registered (size=0x%08"PRIx64
493 " base_addr=0x%08"PRIx64" type: 0x%x)\n",
494 i, r->size, r->base_addr, type);
495 }
496
497 /* Register expansion ROM address */
498 if (d->rom.base_addr && d->rom.size) {
499 uint32_t bar_data = 0;
500
501 /* Re-set BAR reported by OS, otherwise ROM can't be read. */
502 if (xen_host_pci_get_long(d, PCI_ROM_ADDRESS, &bar_data)) {
503 return 0;
504 }
505 if ((bar_data & PCI_ROM_ADDRESS_MASK) == 0) {
506 bar_data |= d->rom.base_addr & PCI_ROM_ADDRESS_MASK;
507 xen_host_pci_set_long(d, PCI_ROM_ADDRESS, bar_data);
508 }
509
510 s->bases[PCI_ROM_SLOT].access.maddr = d->rom.base_addr;
511
512 memory_region_init_io(&s->rom, OBJECT(s), &ops, &s->dev,
513 "xen-pci-pt-rom", d->rom.size);
514 pci_register_bar(&s->dev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_MEM_PREFETCH,
515 &s->rom);
516
517 XEN_PT_LOG(&s->dev, "Expansion ROM registered (size=0x%08"PRIx64
518 " base_addr=0x%08"PRIx64")\n",
519 d->rom.size, d->rom.base_addr);
520 }
521
522 xen_pt_register_vga_regions(d);
523 return 0;
524 }
525
526 /* region mapping */
527
528 static int xen_pt_bar_from_region(XenPCIPassthroughState *s, MemoryRegion *mr)
529 {
530 int i = 0;
531
532 for (i = 0; i < PCI_NUM_REGIONS - 1; i++) {
533 if (mr == &s->bar[i]) {
534 return i;
535 }
536 }
537 if (mr == &s->rom) {
538 return PCI_ROM_SLOT;
539 }
540 return -1;
541 }
542
543 /*
544 * This function checks if an io_region overlaps an io_region from another
545 * device. The io_region to check is provided with (addr, size and type)
546 * A callback can be provided and will be called for every region that is
547 * overlapped.
548 * The return value indicates if the region is overlappsed */
549 struct CheckBarArgs {
550 XenPCIPassthroughState *s;
551 pcibus_t addr;
552 pcibus_t size;
553 uint8_t type;
554 bool rc;
555 };
556 static void xen_pt_check_bar_overlap(PCIBus *bus, PCIDevice *d, void *opaque)
557 {
558 struct CheckBarArgs *arg = opaque;
559 XenPCIPassthroughState *s = arg->s;
560 uint8_t type = arg->type;
561 int i;
562
563 if (d->devfn == s->dev.devfn) {
564 return;
565 }
566
567 /* xxx: This ignores bridges. */
568 for (i = 0; i < PCI_NUM_REGIONS; i++) {
569 const PCIIORegion *r = &d->io_regions[i];
570
571 if (!r->size) {
572 continue;
573 }
574 if ((type & PCI_BASE_ADDRESS_SPACE_IO)
575 != (r->type & PCI_BASE_ADDRESS_SPACE_IO)) {
576 continue;
577 }
578
579 if (ranges_overlap(arg->addr, arg->size, r->addr, r->size)) {
580 XEN_PT_WARN(&s->dev,
581 "Overlapped to device [%02x:%02x.%d] Region: %i"
582 " (addr: 0x%"FMT_PCIBUS", len: 0x%"FMT_PCIBUS")\n",
583 pci_bus_num(bus), PCI_SLOT(d->devfn),
584 PCI_FUNC(d->devfn), i, r->addr, r->size);
585 arg->rc = true;
586 }
587 }
588 }
589
590 static void xen_pt_region_update(XenPCIPassthroughState *s,
591 MemoryRegionSection *sec, bool adding)
592 {
593 PCIDevice *d = &s->dev;
594 MemoryRegion *mr = sec->mr;
595 int bar = -1;
596 int rc;
597 int op = adding ? DPCI_ADD_MAPPING : DPCI_REMOVE_MAPPING;
598 struct CheckBarArgs args = {
599 .s = s,
600 .addr = sec->offset_within_address_space,
601 .size = int128_get64(sec->size),
602 .rc = false,
603 };
604
605 bar = xen_pt_bar_from_region(s, mr);
606 if (bar == -1 && (!s->msix || &s->msix->mmio != mr)) {
607 return;
608 }
609
610 if (s->msix && &s->msix->mmio == mr) {
611 if (adding) {
612 s->msix->mmio_base_addr = sec->offset_within_address_space;
613 rc = xen_pt_msix_update_remap(s, s->msix->bar_index);
614 }
615 return;
616 }
617
618 args.type = d->io_regions[bar].type;
619 pci_for_each_device(pci_get_bus(d), pci_dev_bus_num(d),
620 xen_pt_check_bar_overlap, &args);
621 if (args.rc) {
622 XEN_PT_WARN(d, "Region: %d (addr: 0x%"FMT_PCIBUS
623 ", len: 0x%"FMT_PCIBUS") is overlapped.\n",
624 bar, sec->offset_within_address_space,
625 int128_get64(sec->size));
626 }
627
628 if (d->io_regions[bar].type & PCI_BASE_ADDRESS_SPACE_IO) {
629 uint32_t guest_port = sec->offset_within_address_space;
630 uint32_t machine_port = s->bases[bar].access.pio_base;
631 uint32_t size = int128_get64(sec->size);
632 rc = xc_domain_ioport_mapping(xen_xc, xen_domid,
633 guest_port, machine_port, size,
634 op);
635 if (rc) {
636 XEN_PT_ERR(d, "%s ioport mapping failed! (err: %i)\n",
637 adding ? "create new" : "remove old", errno);
638 }
639 } else {
640 pcibus_t guest_addr = sec->offset_within_address_space;
641 pcibus_t machine_addr = s->bases[bar].access.maddr
642 + sec->offset_within_region;
643 pcibus_t size = int128_get64(sec->size);
644 rc = xc_domain_memory_mapping(xen_xc, xen_domid,
645 XEN_PFN(guest_addr + XC_PAGE_SIZE - 1),
646 XEN_PFN(machine_addr + XC_PAGE_SIZE - 1),
647 XEN_PFN(size + XC_PAGE_SIZE - 1),
648 op);
649 if (rc) {
650 XEN_PT_ERR(d, "%s mem mapping failed! (err: %i)\n",
651 adding ? "create new" : "remove old", errno);
652 }
653 }
654 }
655
656 static void xen_pt_region_add(MemoryListener *l, MemoryRegionSection *sec)
657 {
658 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
659 memory_listener);
660
661 memory_region_ref(sec->mr);
662 xen_pt_region_update(s, sec, true);
663 }
664
665 static void xen_pt_region_del(MemoryListener *l, MemoryRegionSection *sec)
666 {
667 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
668 memory_listener);
669
670 xen_pt_region_update(s, sec, false);
671 memory_region_unref(sec->mr);
672 }
673
674 static void xen_pt_io_region_add(MemoryListener *l, MemoryRegionSection *sec)
675 {
676 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
677 io_listener);
678
679 memory_region_ref(sec->mr);
680 xen_pt_region_update(s, sec, true);
681 }
682
683 static void xen_pt_io_region_del(MemoryListener *l, MemoryRegionSection *sec)
684 {
685 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
686 io_listener);
687
688 xen_pt_region_update(s, sec, false);
689 memory_region_unref(sec->mr);
690 }
691
692 static const MemoryListener xen_pt_memory_listener = {
693 .region_add = xen_pt_region_add,
694 .region_del = xen_pt_region_del,
695 .priority = 10,
696 };
697
698 static const MemoryListener xen_pt_io_listener = {
699 .region_add = xen_pt_io_region_add,
700 .region_del = xen_pt_io_region_del,
701 .priority = 10,
702 };
703
704 static void
705 xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
706 XenHostPCIDevice *dev)
707 {
708 uint16_t gpu_dev_id;
709 PCIDevice *d = &s->dev;
710
711 gpu_dev_id = dev->device_id;
712 igd_passthrough_isa_bridge_create(pci_get_bus(d), gpu_dev_id);
713 }
714
715 /* destroy. */
716 static void xen_pt_destroy(PCIDevice *d) {
717
718 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
719 XenHostPCIDevice *host_dev = &s->real_device;
720 uint8_t machine_irq = s->machine_irq;
721 uint8_t intx;
722 int rc;
723
724 if (machine_irq && !xen_host_pci_device_closed(&s->real_device)) {
725 intx = xen_pt_pci_intx(s);
726 rc = xc_domain_unbind_pt_irq(xen_xc, xen_domid, machine_irq,
727 PT_IRQ_TYPE_PCI,
728 pci_dev_bus_num(d),
729 PCI_SLOT(s->dev.devfn),
730 intx,
731 0 /* isa_irq */);
732 if (rc < 0) {
733 XEN_PT_ERR(d, "unbinding of interrupt INT%c failed."
734 " (machine irq: %i, err: %d)"
735 " But bravely continuing on..\n",
736 'a' + intx, machine_irq, errno);
737 }
738 }
739
740 /* N.B. xen_pt_config_delete takes care of freeing them. */
741 if (s->msi) {
742 xen_pt_msi_disable(s);
743 }
744 if (s->msix) {
745 xen_pt_msix_disable(s);
746 }
747
748 if (machine_irq) {
749 xen_pt_mapped_machine_irq[machine_irq]--;
750
751 if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
752 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq);
753
754 if (rc < 0) {
755 XEN_PT_ERR(d, "unmapping of interrupt %i failed. (err: %d)"
756 " But bravely continuing on..\n",
757 machine_irq, errno);
758 }
759 }
760 s->machine_irq = 0;
761 }
762
763 /* delete all emulated config registers */
764 xen_pt_config_delete(s);
765
766 xen_pt_unregister_vga_regions(host_dev);
767
768 if (s->listener_set) {
769 memory_listener_unregister(&s->memory_listener);
770 memory_listener_unregister(&s->io_listener);
771 s->listener_set = false;
772 }
773 if (!xen_host_pci_device_closed(&s->real_device)) {
774 xen_host_pci_device_put(&s->real_device);
775 }
776 }
777 /* init */
778
779 static void xen_pt_realize(PCIDevice *d, Error **errp)
780 {
781 ERRP_GUARD();
782 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
783 int i, rc = 0;
784 uint8_t machine_irq = 0, scratch;
785 uint16_t cmd = 0;
786 int pirq = XEN_PT_UNASSIGNED_PIRQ;
787
788 /* register real device */
789 XEN_PT_LOG(d, "Assigning real physical device %02x:%02x.%d"
790 " to devfn 0x%x\n",
791 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function,
792 s->dev.devfn);
793
794 xen_host_pci_device_get(&s->real_device,
795 s->hostaddr.domain, s->hostaddr.bus,
796 s->hostaddr.slot, s->hostaddr.function,
797 errp);
798 if (*errp) {
799 error_append_hint(errp, "Failed to \"open\" the real pci device");
800 return;
801 }
802
803 s->is_virtfn = s->real_device.is_virtfn;
804 if (s->is_virtfn) {
805 XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n",
806 s->real_device.domain, s->real_device.bus,
807 s->real_device.dev, s->real_device.func);
808 }
809
810 /* Initialize virtualized PCI configuration (Extended 256 Bytes) */
811 memset(d->config, 0, PCI_CONFIG_SPACE_SIZE);
812
813 s->memory_listener = xen_pt_memory_listener;
814 s->io_listener = xen_pt_io_listener;
815
816 /* Setup VGA bios for passthrough GFX */
817 if ((s->real_device.domain == 0) && (s->real_device.bus == 0) &&
818 (s->real_device.dev == 2) && (s->real_device.func == 0)) {
819 if (!is_igd_vga_passthrough(&s->real_device)) {
820 error_setg(errp, "Need to enable igd-passthru if you're trying"
821 " to passthrough IGD GFX");
822 xen_host_pci_device_put(&s->real_device);
823 return;
824 }
825
826 xen_pt_setup_vga(s, &s->real_device, errp);
827 if (*errp) {
828 error_append_hint(errp, "Setup VGA BIOS of passthrough"
829 " GFX failed");
830 xen_host_pci_device_put(&s->real_device);
831 return;
832 }
833
834 /* Register ISA bridge for passthrough GFX. */
835 xen_igd_passthrough_isa_bridge_create(s, &s->real_device);
836 }
837
838 /* Handle real device's MMIO/PIO BARs */
839 xen_pt_register_regions(s, &cmd);
840
841 /* reinitialize each config register to be emulated */
842 xen_pt_config_init(s, errp);
843 if (*errp) {
844 error_append_hint(errp, "PCI Config space initialisation failed");
845 rc = -1;
846 goto err_out;
847 }
848
849 /* Bind interrupt */
850 rc = xen_host_pci_get_byte(&s->real_device, PCI_INTERRUPT_PIN, &scratch);
851 if (rc) {
852 error_setg_errno(errp, errno, "Failed to read PCI_INTERRUPT_PIN");
853 goto err_out;
854 }
855 if (!scratch) {
856 XEN_PT_LOG(d, "no pin interrupt\n");
857 goto out;
858 }
859
860 machine_irq = s->real_device.irq;
861 if (machine_irq == 0) {
862 XEN_PT_LOG(d, "machine irq is 0\n");
863 cmd |= PCI_COMMAND_INTX_DISABLE;
864 goto out;
865 }
866
867 rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq);
868 if (rc < 0) {
869 XEN_PT_ERR(d, "Mapping machine irq %u to pirq %i failed, (err: %d)\n",
870 machine_irq, pirq, errno);
871
872 /* Disable PCI intx assertion (turn on bit10 of devctl) */
873 cmd |= PCI_COMMAND_INTX_DISABLE;
874 machine_irq = 0;
875 s->machine_irq = 0;
876 } else {
877 machine_irq = pirq;
878 s->machine_irq = pirq;
879 xen_pt_mapped_machine_irq[machine_irq]++;
880 }
881
882 /* bind machine_irq to device */
883 if (machine_irq != 0) {
884 uint8_t e_intx = xen_pt_pci_intx(s);
885
886 rc = xc_domain_bind_pt_pci_irq(xen_xc, xen_domid, machine_irq,
887 pci_dev_bus_num(d),
888 PCI_SLOT(d->devfn),
889 e_intx);
890 if (rc < 0) {
891 XEN_PT_ERR(d, "Binding of interrupt %i failed! (err: %d)\n",
892 e_intx, errno);
893
894 /* Disable PCI intx assertion (turn on bit10 of devctl) */
895 cmd |= PCI_COMMAND_INTX_DISABLE;
896 xen_pt_mapped_machine_irq[machine_irq]--;
897
898 if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
899 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq)) {
900 XEN_PT_ERR(d, "Unmapping of machine interrupt %i failed!"
901 " (err: %d)\n", machine_irq, errno);
902 }
903 }
904 s->machine_irq = 0;
905 }
906 }
907
908 out:
909 if (cmd) {
910 uint16_t val;
911
912 rc = xen_host_pci_get_word(&s->real_device, PCI_COMMAND, &val);
913 if (rc) {
914 error_setg_errno(errp, errno, "Failed to read PCI_COMMAND");
915 goto err_out;
916 } else {
917 val |= cmd;
918 rc = xen_host_pci_set_word(&s->real_device, PCI_COMMAND, val);
919 if (rc) {
920 error_setg_errno(errp, errno, "Failed to write PCI_COMMAND"
921 " val = 0x%x", val);
922 goto err_out;
923 }
924 }
925 }
926
927 memory_listener_register(&s->memory_listener, &address_space_memory);
928 memory_listener_register(&s->io_listener, &address_space_io);
929 s->listener_set = true;
930 XEN_PT_LOG(d,
931 "Real physical device %02x:%02x.%d registered successfully\n",
932 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function);
933
934 return;
935
936 err_out:
937 for (i = 0; i < PCI_ROM_SLOT; i++) {
938 object_unparent(OBJECT(&s->bar[i]));
939 }
940 object_unparent(OBJECT(&s->rom));
941
942 xen_pt_destroy(d);
943 assert(rc);
944 }
945
946 static void xen_pt_unregister_device(PCIDevice *d)
947 {
948 xen_pt_destroy(d);
949 }
950
951 static Property xen_pci_passthrough_properties[] = {
952 DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr),
953 DEFINE_PROP_BOOL("permissive", XenPCIPassthroughState, permissive, false),
954 DEFINE_PROP_END_OF_LIST(),
955 };
956
957 static void xen_pci_passthrough_instance_init(Object *obj)
958 {
959 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
960 * line, therefore, no need to wait to realize like other devices */
961 PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
962 }
963
964 static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
965 {
966 DeviceClass *dc = DEVICE_CLASS(klass);
967 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
968
969 k->realize = xen_pt_realize;
970 k->exit = xen_pt_unregister_device;
971 k->config_read = xen_pt_pci_read_config;
972 k->config_write = xen_pt_pci_write_config;
973 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
974 dc->desc = "Assign an host PCI device with Xen";
975 device_class_set_props(dc, xen_pci_passthrough_properties);
976 };
977
978 static void xen_pci_passthrough_finalize(Object *obj)
979 {
980 XenPCIPassthroughState *s = XEN_PT_DEVICE(obj);
981
982 xen_pt_msix_delete(s);
983 }
984
985 static const TypeInfo xen_pci_passthrough_info = {
986 .name = TYPE_XEN_PT_DEVICE,
987 .parent = TYPE_PCI_DEVICE,
988 .instance_size = sizeof(XenPCIPassthroughState),
989 .instance_finalize = xen_pci_passthrough_finalize,
990 .class_init = xen_pci_passthrough_class_init,
991 .instance_init = xen_pci_passthrough_instance_init,
992 .interfaces = (InterfaceInfo[]) {
993 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
994 { INTERFACE_PCIE_DEVICE },
995 { },
996 },
997 };
998
999 static void xen_pci_passthrough_register_types(void)
1000 {
1001 type_register_static(&xen_pci_passthrough_info);
1002 }
1003
1004 type_init(xen_pci_passthrough_register_types)