accel: Move Xen accelerator code under accel/xen/
[qemu.git] / hw / xen / xen_pt.c
1 /*
2 * Copyright (c) 2007, Neocleus Corporation.
3 * Copyright (c) 2007, Intel Corporation.
4 *
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 *
8 * Alex Novik <alex@neocleus.com>
9 * Allen Kay <allen.m.kay@intel.com>
10 * Guy Zana <guy@neocleus.com>
11 *
12 * This file implements direct PCI assignment to a HVM guest
13 */
14
15 /*
16 * Interrupt Disable policy:
17 *
18 * INTx interrupt:
19 * Initialize(register_real_device)
20 * Map INTx(xc_physdev_map_pirq):
21 * <fail>
22 * - Set real Interrupt Disable bit to '1'.
23 * - Set machine_irq and assigned_device->machine_irq to '0'.
24 * * Don't bind INTx.
25 *
26 * Bind INTx(xc_domain_bind_pt_pci_irq):
27 * <fail>
28 * - Set real Interrupt Disable bit to '1'.
29 * - Unmap INTx.
30 * - Decrement xen_pt_mapped_machine_irq[machine_irq]
31 * - Set assigned_device->machine_irq to '0'.
32 *
33 * Write to Interrupt Disable bit by guest software(xen_pt_cmd_reg_write)
34 * Write '0'
35 * - Set real bit to '0' if assigned_device->machine_irq isn't '0'.
36 *
37 * Write '1'
38 * - Set real bit to '1'.
39 *
40 * MSI interrupt:
41 * Initialize MSI register(xen_pt_msi_setup, xen_pt_msi_update)
42 * Bind MSI(xc_domain_update_msi_irq)
43 * <fail>
44 * - Unmap MSI.
45 * - Set dev->msi->pirq to '-1'.
46 *
47 * MSI-X interrupt:
48 * Initialize MSI-X register(xen_pt_msix_update_one)
49 * Bind MSI-X(xc_domain_update_msi_irq)
50 * <fail>
51 * - Unmap MSI-X.
52 * - Set entry->pirq to '-1'.
53 */
54
55 #include "qemu/osdep.h"
56 #include "qapi/error.h"
57 #include <sys/ioctl.h>
58
59 #include "hw/pci/pci.h"
60 #include "hw/qdev-properties.h"
61 #include "hw/xen/xen.h"
62 #include "hw/i386/pc.h"
63 #include "hw/xen/xen-legacy-backend.h"
64 #include "xen_pt.h"
65 #include "qemu/range.h"
66 #include "exec/address-spaces.h"
67
68 bool has_igd_gfx_passthru;
69
70 #define XEN_PT_NR_IRQS (256)
71 static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0};
72
73 void xen_pt_log(const PCIDevice *d, const char *f, ...)
74 {
75 va_list ap;
76
77 va_start(ap, f);
78 if (d) {
79 fprintf(stderr, "[%02x:%02x.%d] ", pci_dev_bus_num(d),
80 PCI_SLOT(d->devfn), PCI_FUNC(d->devfn));
81 }
82 vfprintf(stderr, f, ap);
83 va_end(ap);
84 }
85
86 /* Config Space */
87
88 static int xen_pt_pci_config_access_check(PCIDevice *d, uint32_t addr, int len)
89 {
90 /* check offset range */
91 if (addr > 0xFF) {
92 XEN_PT_ERR(d, "Failed to access register with offset exceeding 0xFF. "
93 "(addr: 0x%02x, len: %d)\n", addr, len);
94 return -1;
95 }
96
97 /* check read size */
98 if ((len != 1) && (len != 2) && (len != 4)) {
99 XEN_PT_ERR(d, "Failed to access register with invalid access length. "
100 "(addr: 0x%02x, len: %d)\n", addr, len);
101 return -1;
102 }
103
104 /* check offset alignment */
105 if (addr & (len - 1)) {
106 XEN_PT_ERR(d, "Failed to access register with invalid access size "
107 "alignment. (addr: 0x%02x, len: %d)\n", addr, len);
108 return -1;
109 }
110
111 return 0;
112 }
113
114 int xen_pt_bar_offset_to_index(uint32_t offset)
115 {
116 int index = 0;
117
118 /* check Exp ROM BAR */
119 if (offset == PCI_ROM_ADDRESS) {
120 return PCI_ROM_SLOT;
121 }
122
123 /* calculate BAR index */
124 index = (offset - PCI_BASE_ADDRESS_0) >> 2;
125 if (index >= PCI_NUM_REGIONS) {
126 return -1;
127 }
128
129 return index;
130 }
131
132 static uint32_t xen_pt_pci_read_config(PCIDevice *d, uint32_t addr, int len)
133 {
134 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
135 uint32_t val = 0;
136 XenPTRegGroup *reg_grp_entry = NULL;
137 XenPTReg *reg_entry = NULL;
138 int rc = 0;
139 int emul_len = 0;
140 uint32_t find_addr = addr;
141
142 if (xen_pt_pci_config_access_check(d, addr, len)) {
143 goto exit;
144 }
145
146 /* find register group entry */
147 reg_grp_entry = xen_pt_find_reg_grp(s, addr);
148 if (reg_grp_entry) {
149 /* check 0-Hardwired register group */
150 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
151 /* no need to emulate, just return 0 */
152 val = 0;
153 goto exit;
154 }
155 }
156
157 /* read I/O device register value */
158 rc = xen_host_pci_get_block(&s->real_device, addr, (uint8_t *)&val, len);
159 if (rc < 0) {
160 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
161 memset(&val, 0xff, len);
162 }
163
164 /* just return the I/O device register value for
165 * passthrough type register group */
166 if (reg_grp_entry == NULL) {
167 goto exit;
168 }
169
170 /* adjust the read value to appropriate CFC-CFF window */
171 val <<= (addr & 3) << 3;
172 emul_len = len;
173
174 /* loop around the guest requested size */
175 while (emul_len > 0) {
176 /* find register entry to be emulated */
177 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
178 if (reg_entry) {
179 XenPTRegInfo *reg = reg_entry->reg;
180 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
181 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
182 uint8_t *ptr_val = NULL;
183
184 valid_mask <<= (find_addr - real_offset) << 3;
185 ptr_val = (uint8_t *)&val + (real_offset & 3);
186
187 /* do emulation based on register size */
188 switch (reg->size) {
189 case 1:
190 if (reg->u.b.read) {
191 rc = reg->u.b.read(s, reg_entry, ptr_val, valid_mask);
192 }
193 break;
194 case 2:
195 if (reg->u.w.read) {
196 rc = reg->u.w.read(s, reg_entry,
197 (uint16_t *)ptr_val, valid_mask);
198 }
199 break;
200 case 4:
201 if (reg->u.dw.read) {
202 rc = reg->u.dw.read(s, reg_entry,
203 (uint32_t *)ptr_val, valid_mask);
204 }
205 break;
206 }
207
208 if (rc < 0) {
209 xen_shutdown_fatal_error("Internal error: Invalid read "
210 "emulation. (%s, rc: %d)\n",
211 __func__, rc);
212 return 0;
213 }
214
215 /* calculate next address to find */
216 emul_len -= reg->size;
217 if (emul_len > 0) {
218 find_addr = real_offset + reg->size;
219 }
220 } else {
221 /* nothing to do with passthrough type register,
222 * continue to find next byte */
223 emul_len--;
224 find_addr++;
225 }
226 }
227
228 /* need to shift back before returning them to pci bus emulator */
229 val >>= ((addr & 3) << 3);
230
231 exit:
232 XEN_PT_LOG_CONFIG(d, addr, val, len);
233 return val;
234 }
235
236 static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
237 uint32_t val, int len)
238 {
239 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
240 int index = 0;
241 XenPTRegGroup *reg_grp_entry = NULL;
242 int rc = 0;
243 uint32_t read_val = 0, wb_mask;
244 int emul_len = 0;
245 XenPTReg *reg_entry = NULL;
246 uint32_t find_addr = addr;
247 XenPTRegInfo *reg = NULL;
248 bool wp_flag = false;
249
250 if (xen_pt_pci_config_access_check(d, addr, len)) {
251 return;
252 }
253
254 XEN_PT_LOG_CONFIG(d, addr, val, len);
255
256 /* check unused BAR register */
257 index = xen_pt_bar_offset_to_index(addr);
258 if ((index >= 0) && (val != 0)) {
259 uint32_t chk = val;
260
261 if (index == PCI_ROM_SLOT)
262 chk |= (uint32_t)~PCI_ROM_ADDRESS_MASK;
263
264 if ((chk != XEN_PT_BAR_ALLF) &&
265 (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED)) {
266 XEN_PT_WARN(d, "Guest attempt to set address to unused "
267 "Base Address Register. (addr: 0x%02x, len: %d)\n",
268 addr, len);
269 }
270 }
271
272 /* find register group entry */
273 reg_grp_entry = xen_pt_find_reg_grp(s, addr);
274 if (reg_grp_entry) {
275 /* check 0-Hardwired register group */
276 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
277 /* ignore silently */
278 XEN_PT_WARN(d, "Access to 0-Hardwired register. "
279 "(addr: 0x%02x, len: %d)\n", addr, len);
280 return;
281 }
282 }
283
284 rc = xen_host_pci_get_block(&s->real_device, addr,
285 (uint8_t *)&read_val, len);
286 if (rc < 0) {
287 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
288 memset(&read_val, 0xff, len);
289 wb_mask = 0;
290 } else {
291 wb_mask = 0xFFFFFFFF >> ((4 - len) << 3);
292 }
293
294 /* pass directly to the real device for passthrough type register group */
295 if (reg_grp_entry == NULL) {
296 if (!s->permissive) {
297 wb_mask = 0;
298 wp_flag = true;
299 }
300 goto out;
301 }
302
303 memory_region_transaction_begin();
304 pci_default_write_config(d, addr, val, len);
305
306 /* adjust the read and write value to appropriate CFC-CFF window */
307 read_val <<= (addr & 3) << 3;
308 val <<= (addr & 3) << 3;
309 emul_len = len;
310
311 /* loop around the guest requested size */
312 while (emul_len > 0) {
313 /* find register entry to be emulated */
314 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
315 if (reg_entry) {
316 reg = reg_entry->reg;
317 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
318 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
319 uint8_t *ptr_val = NULL;
320 uint32_t wp_mask = reg->emu_mask | reg->ro_mask;
321
322 valid_mask <<= (find_addr - real_offset) << 3;
323 ptr_val = (uint8_t *)&val + (real_offset & 3);
324 if (!s->permissive) {
325 wp_mask |= reg->res_mask;
326 }
327 if (wp_mask == (0xFFFFFFFF >> ((4 - reg->size) << 3))) {
328 wb_mask &= ~((wp_mask >> ((find_addr - real_offset) << 3))
329 << ((len - emul_len) << 3));
330 }
331
332 /* do emulation based on register size */
333 switch (reg->size) {
334 case 1:
335 if (reg->u.b.write) {
336 rc = reg->u.b.write(s, reg_entry, ptr_val,
337 read_val >> ((real_offset & 3) << 3),
338 valid_mask);
339 }
340 break;
341 case 2:
342 if (reg->u.w.write) {
343 rc = reg->u.w.write(s, reg_entry, (uint16_t *)ptr_val,
344 (read_val >> ((real_offset & 3) << 3)),
345 valid_mask);
346 }
347 break;
348 case 4:
349 if (reg->u.dw.write) {
350 rc = reg->u.dw.write(s, reg_entry, (uint32_t *)ptr_val,
351 (read_val >> ((real_offset & 3) << 3)),
352 valid_mask);
353 }
354 break;
355 }
356
357 if (rc < 0) {
358 xen_shutdown_fatal_error("Internal error: Invalid write"
359 " emulation. (%s, rc: %d)\n",
360 __func__, rc);
361 return;
362 }
363
364 /* calculate next address to find */
365 emul_len -= reg->size;
366 if (emul_len > 0) {
367 find_addr = real_offset + reg->size;
368 }
369 } else {
370 /* nothing to do with passthrough type register,
371 * continue to find next byte */
372 if (!s->permissive) {
373 wb_mask &= ~(0xff << ((len - emul_len) << 3));
374 /* Unused BARs will make it here, but we don't want to issue
375 * warnings for writes to them (bogus writes get dealt with
376 * above).
377 */
378 if (index < 0) {
379 wp_flag = true;
380 }
381 }
382 emul_len--;
383 find_addr++;
384 }
385 }
386
387 /* need to shift back before passing them to xen_host_pci_set_block. */
388 val >>= (addr & 3) << 3;
389
390 memory_region_transaction_commit();
391
392 out:
393 if (wp_flag && !s->permissive_warned) {
394 s->permissive_warned = true;
395 xen_pt_log(d, "Write-back to unknown field 0x%02x (partially) inhibited (0x%0*x)\n",
396 addr, len * 2, wb_mask);
397 xen_pt_log(d, "If the device doesn't work, try enabling permissive mode\n");
398 xen_pt_log(d, "(unsafe) and if it helps report the problem to xen-devel\n");
399 }
400 for (index = 0; wb_mask; index += len) {
401 /* unknown regs are passed through */
402 while (!(wb_mask & 0xff)) {
403 index++;
404 wb_mask >>= 8;
405 }
406 len = 0;
407 do {
408 len++;
409 wb_mask >>= 8;
410 } while (wb_mask & 0xff);
411 rc = xen_host_pci_set_block(&s->real_device, addr + index,
412 (uint8_t *)&val + index, len);
413
414 if (rc < 0) {
415 XEN_PT_ERR(d, "xen_host_pci_set_block failed. return value: %d.\n", rc);
416 }
417 }
418 }
419
420 /* register regions */
421
422 static uint64_t xen_pt_bar_read(void *o, hwaddr addr,
423 unsigned size)
424 {
425 PCIDevice *d = o;
426 /* if this function is called, that probably means that there is a
427 * misconfiguration of the IOMMU. */
428 XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"TARGET_FMT_plx"\n",
429 addr);
430 return 0;
431 }
432 static void xen_pt_bar_write(void *o, hwaddr addr, uint64_t val,
433 unsigned size)
434 {
435 PCIDevice *d = o;
436 /* Same comment as xen_pt_bar_read function */
437 XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"TARGET_FMT_plx"\n",
438 addr);
439 }
440
441 static const MemoryRegionOps ops = {
442 .endianness = DEVICE_NATIVE_ENDIAN,
443 .read = xen_pt_bar_read,
444 .write = xen_pt_bar_write,
445 };
446
447 static int xen_pt_register_regions(XenPCIPassthroughState *s, uint16_t *cmd)
448 {
449 int i = 0;
450 XenHostPCIDevice *d = &s->real_device;
451
452 /* Register PIO/MMIO BARs */
453 for (i = 0; i < PCI_ROM_SLOT; i++) {
454 XenHostPCIIORegion *r = &d->io_regions[i];
455 uint8_t type;
456
457 if (r->base_addr == 0 || r->size == 0) {
458 continue;
459 }
460
461 s->bases[i].access.u = r->base_addr;
462
463 if (r->type & XEN_HOST_PCI_REGION_TYPE_IO) {
464 type = PCI_BASE_ADDRESS_SPACE_IO;
465 *cmd |= PCI_COMMAND_IO;
466 } else {
467 type = PCI_BASE_ADDRESS_SPACE_MEMORY;
468 if (r->type & XEN_HOST_PCI_REGION_TYPE_PREFETCH) {
469 type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
470 }
471 if (r->type & XEN_HOST_PCI_REGION_TYPE_MEM_64) {
472 type |= PCI_BASE_ADDRESS_MEM_TYPE_64;
473 }
474 *cmd |= PCI_COMMAND_MEMORY;
475 }
476
477 memory_region_init_io(&s->bar[i], OBJECT(s), &ops, &s->dev,
478 "xen-pci-pt-bar", r->size);
479 pci_register_bar(&s->dev, i, type, &s->bar[i]);
480
481 XEN_PT_LOG(&s->dev, "IO region %i registered (size=0x%08"PRIx64
482 " base_addr=0x%08"PRIx64" type: %#x)\n",
483 i, r->size, r->base_addr, type);
484 }
485
486 /* Register expansion ROM address */
487 if (d->rom.base_addr && d->rom.size) {
488 uint32_t bar_data = 0;
489
490 /* Re-set BAR reported by OS, otherwise ROM can't be read. */
491 if (xen_host_pci_get_long(d, PCI_ROM_ADDRESS, &bar_data)) {
492 return 0;
493 }
494 if ((bar_data & PCI_ROM_ADDRESS_MASK) == 0) {
495 bar_data |= d->rom.base_addr & PCI_ROM_ADDRESS_MASK;
496 xen_host_pci_set_long(d, PCI_ROM_ADDRESS, bar_data);
497 }
498
499 s->bases[PCI_ROM_SLOT].access.maddr = d->rom.base_addr;
500
501 memory_region_init_io(&s->rom, OBJECT(s), &ops, &s->dev,
502 "xen-pci-pt-rom", d->rom.size);
503 pci_register_bar(&s->dev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_MEM_PREFETCH,
504 &s->rom);
505
506 XEN_PT_LOG(&s->dev, "Expansion ROM registered (size=0x%08"PRIx64
507 " base_addr=0x%08"PRIx64")\n",
508 d->rom.size, d->rom.base_addr);
509 }
510
511 xen_pt_register_vga_regions(d);
512 return 0;
513 }
514
515 /* region mapping */
516
517 static int xen_pt_bar_from_region(XenPCIPassthroughState *s, MemoryRegion *mr)
518 {
519 int i = 0;
520
521 for (i = 0; i < PCI_NUM_REGIONS - 1; i++) {
522 if (mr == &s->bar[i]) {
523 return i;
524 }
525 }
526 if (mr == &s->rom) {
527 return PCI_ROM_SLOT;
528 }
529 return -1;
530 }
531
532 /*
533 * This function checks if an io_region overlaps an io_region from another
534 * device. The io_region to check is provided with (addr, size and type)
535 * A callback can be provided and will be called for every region that is
536 * overlapped.
537 * The return value indicates if the region is overlappsed */
538 struct CheckBarArgs {
539 XenPCIPassthroughState *s;
540 pcibus_t addr;
541 pcibus_t size;
542 uint8_t type;
543 bool rc;
544 };
545 static void xen_pt_check_bar_overlap(PCIBus *bus, PCIDevice *d, void *opaque)
546 {
547 struct CheckBarArgs *arg = opaque;
548 XenPCIPassthroughState *s = arg->s;
549 uint8_t type = arg->type;
550 int i;
551
552 if (d->devfn == s->dev.devfn) {
553 return;
554 }
555
556 /* xxx: This ignores bridges. */
557 for (i = 0; i < PCI_NUM_REGIONS; i++) {
558 const PCIIORegion *r = &d->io_regions[i];
559
560 if (!r->size) {
561 continue;
562 }
563 if ((type & PCI_BASE_ADDRESS_SPACE_IO)
564 != (r->type & PCI_BASE_ADDRESS_SPACE_IO)) {
565 continue;
566 }
567
568 if (ranges_overlap(arg->addr, arg->size, r->addr, r->size)) {
569 XEN_PT_WARN(&s->dev,
570 "Overlapped to device [%02x:%02x.%d] Region: %i"
571 " (addr: %#"FMT_PCIBUS", len: %#"FMT_PCIBUS")\n",
572 pci_bus_num(bus), PCI_SLOT(d->devfn),
573 PCI_FUNC(d->devfn), i, r->addr, r->size);
574 arg->rc = true;
575 }
576 }
577 }
578
579 static void xen_pt_region_update(XenPCIPassthroughState *s,
580 MemoryRegionSection *sec, bool adding)
581 {
582 PCIDevice *d = &s->dev;
583 MemoryRegion *mr = sec->mr;
584 int bar = -1;
585 int rc;
586 int op = adding ? DPCI_ADD_MAPPING : DPCI_REMOVE_MAPPING;
587 struct CheckBarArgs args = {
588 .s = s,
589 .addr = sec->offset_within_address_space,
590 .size = int128_get64(sec->size),
591 .rc = false,
592 };
593
594 bar = xen_pt_bar_from_region(s, mr);
595 if (bar == -1 && (!s->msix || &s->msix->mmio != mr)) {
596 return;
597 }
598
599 if (s->msix && &s->msix->mmio == mr) {
600 if (adding) {
601 s->msix->mmio_base_addr = sec->offset_within_address_space;
602 rc = xen_pt_msix_update_remap(s, s->msix->bar_index);
603 }
604 return;
605 }
606
607 args.type = d->io_regions[bar].type;
608 pci_for_each_device(pci_get_bus(d), pci_dev_bus_num(d),
609 xen_pt_check_bar_overlap, &args);
610 if (args.rc) {
611 XEN_PT_WARN(d, "Region: %d (addr: %#"FMT_PCIBUS
612 ", len: %#"FMT_PCIBUS") is overlapped.\n",
613 bar, sec->offset_within_address_space,
614 int128_get64(sec->size));
615 }
616
617 if (d->io_regions[bar].type & PCI_BASE_ADDRESS_SPACE_IO) {
618 uint32_t guest_port = sec->offset_within_address_space;
619 uint32_t machine_port = s->bases[bar].access.pio_base;
620 uint32_t size = int128_get64(sec->size);
621 rc = xc_domain_ioport_mapping(xen_xc, xen_domid,
622 guest_port, machine_port, size,
623 op);
624 if (rc) {
625 XEN_PT_ERR(d, "%s ioport mapping failed! (err: %i)\n",
626 adding ? "create new" : "remove old", errno);
627 }
628 } else {
629 pcibus_t guest_addr = sec->offset_within_address_space;
630 pcibus_t machine_addr = s->bases[bar].access.maddr
631 + sec->offset_within_region;
632 pcibus_t size = int128_get64(sec->size);
633 rc = xc_domain_memory_mapping(xen_xc, xen_domid,
634 XEN_PFN(guest_addr + XC_PAGE_SIZE - 1),
635 XEN_PFN(machine_addr + XC_PAGE_SIZE - 1),
636 XEN_PFN(size + XC_PAGE_SIZE - 1),
637 op);
638 if (rc) {
639 XEN_PT_ERR(d, "%s mem mapping failed! (err: %i)\n",
640 adding ? "create new" : "remove old", errno);
641 }
642 }
643 }
644
645 static void xen_pt_region_add(MemoryListener *l, MemoryRegionSection *sec)
646 {
647 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
648 memory_listener);
649
650 memory_region_ref(sec->mr);
651 xen_pt_region_update(s, sec, true);
652 }
653
654 static void xen_pt_region_del(MemoryListener *l, MemoryRegionSection *sec)
655 {
656 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
657 memory_listener);
658
659 xen_pt_region_update(s, sec, false);
660 memory_region_unref(sec->mr);
661 }
662
663 static void xen_pt_io_region_add(MemoryListener *l, MemoryRegionSection *sec)
664 {
665 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
666 io_listener);
667
668 memory_region_ref(sec->mr);
669 xen_pt_region_update(s, sec, true);
670 }
671
672 static void xen_pt_io_region_del(MemoryListener *l, MemoryRegionSection *sec)
673 {
674 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
675 io_listener);
676
677 xen_pt_region_update(s, sec, false);
678 memory_region_unref(sec->mr);
679 }
680
681 static const MemoryListener xen_pt_memory_listener = {
682 .region_add = xen_pt_region_add,
683 .region_del = xen_pt_region_del,
684 .priority = 10,
685 };
686
687 static const MemoryListener xen_pt_io_listener = {
688 .region_add = xen_pt_io_region_add,
689 .region_del = xen_pt_io_region_del,
690 .priority = 10,
691 };
692
693 static void
694 xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
695 XenHostPCIDevice *dev)
696 {
697 uint16_t gpu_dev_id;
698 PCIDevice *d = &s->dev;
699
700 gpu_dev_id = dev->device_id;
701 igd_passthrough_isa_bridge_create(pci_get_bus(d), gpu_dev_id);
702 }
703
704 /* destroy. */
705 static void xen_pt_destroy(PCIDevice *d) {
706
707 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
708 XenHostPCIDevice *host_dev = &s->real_device;
709 uint8_t machine_irq = s->machine_irq;
710 uint8_t intx;
711 int rc;
712
713 if (machine_irq && !xen_host_pci_device_closed(&s->real_device)) {
714 intx = xen_pt_pci_intx(s);
715 rc = xc_domain_unbind_pt_irq(xen_xc, xen_domid, machine_irq,
716 PT_IRQ_TYPE_PCI,
717 pci_dev_bus_num(d),
718 PCI_SLOT(s->dev.devfn),
719 intx,
720 0 /* isa_irq */);
721 if (rc < 0) {
722 XEN_PT_ERR(d, "unbinding of interrupt INT%c failed."
723 " (machine irq: %i, err: %d)"
724 " But bravely continuing on..\n",
725 'a' + intx, machine_irq, errno);
726 }
727 }
728
729 /* N.B. xen_pt_config_delete takes care of freeing them. */
730 if (s->msi) {
731 xen_pt_msi_disable(s);
732 }
733 if (s->msix) {
734 xen_pt_msix_disable(s);
735 }
736
737 if (machine_irq) {
738 xen_pt_mapped_machine_irq[machine_irq]--;
739
740 if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
741 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq);
742
743 if (rc < 0) {
744 XEN_PT_ERR(d, "unmapping of interrupt %i failed. (err: %d)"
745 " But bravely continuing on..\n",
746 machine_irq, errno);
747 }
748 }
749 s->machine_irq = 0;
750 }
751
752 /* delete all emulated config registers */
753 xen_pt_config_delete(s);
754
755 xen_pt_unregister_vga_regions(host_dev);
756
757 if (s->listener_set) {
758 memory_listener_unregister(&s->memory_listener);
759 memory_listener_unregister(&s->io_listener);
760 s->listener_set = false;
761 }
762 if (!xen_host_pci_device_closed(&s->real_device)) {
763 xen_host_pci_device_put(&s->real_device);
764 }
765 }
766 /* init */
767
768 static void xen_pt_realize(PCIDevice *d, Error **errp)
769 {
770 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
771 int i, rc = 0;
772 uint8_t machine_irq = 0, scratch;
773 uint16_t cmd = 0;
774 int pirq = XEN_PT_UNASSIGNED_PIRQ;
775 Error *err = NULL;
776
777 /* register real device */
778 XEN_PT_LOG(d, "Assigning real physical device %02x:%02x.%d"
779 " to devfn %#x\n",
780 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function,
781 s->dev.devfn);
782
783 xen_host_pci_device_get(&s->real_device,
784 s->hostaddr.domain, s->hostaddr.bus,
785 s->hostaddr.slot, s->hostaddr.function,
786 &err);
787 if (err) {
788 error_append_hint(&err, "Failed to \"open\" the real pci device");
789 error_propagate(errp, err);
790 return;
791 }
792
793 s->is_virtfn = s->real_device.is_virtfn;
794 if (s->is_virtfn) {
795 XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n",
796 s->real_device.domain, s->real_device.bus,
797 s->real_device.dev, s->real_device.func);
798 }
799
800 /* Initialize virtualized PCI configuration (Extended 256 Bytes) */
801 memset(d->config, 0, PCI_CONFIG_SPACE_SIZE);
802
803 s->memory_listener = xen_pt_memory_listener;
804 s->io_listener = xen_pt_io_listener;
805
806 /* Setup VGA bios for passthrough GFX */
807 if ((s->real_device.domain == 0) && (s->real_device.bus == 0) &&
808 (s->real_device.dev == 2) && (s->real_device.func == 0)) {
809 if (!is_igd_vga_passthrough(&s->real_device)) {
810 error_setg(errp, "Need to enable igd-passthru if you're trying"
811 " to passthrough IGD GFX");
812 xen_host_pci_device_put(&s->real_device);
813 return;
814 }
815
816 xen_pt_setup_vga(s, &s->real_device, &err);
817 if (err) {
818 error_append_hint(&err, "Setup VGA BIOS of passthrough"
819 " GFX failed");
820 error_propagate(errp, err);
821 xen_host_pci_device_put(&s->real_device);
822 return;
823 }
824
825 /* Register ISA bridge for passthrough GFX. */
826 xen_igd_passthrough_isa_bridge_create(s, &s->real_device);
827 }
828
829 /* Handle real device's MMIO/PIO BARs */
830 xen_pt_register_regions(s, &cmd);
831
832 /* reinitialize each config register to be emulated */
833 xen_pt_config_init(s, &err);
834 if (err) {
835 error_append_hint(&err, "PCI Config space initialisation failed");
836 error_propagate(errp, err);
837 rc = -1;
838 goto err_out;
839 }
840
841 /* Bind interrupt */
842 rc = xen_host_pci_get_byte(&s->real_device, PCI_INTERRUPT_PIN, &scratch);
843 if (rc) {
844 error_setg_errno(errp, errno, "Failed to read PCI_INTERRUPT_PIN");
845 goto err_out;
846 }
847 if (!scratch) {
848 XEN_PT_LOG(d, "no pin interrupt\n");
849 goto out;
850 }
851
852 machine_irq = s->real_device.irq;
853 if (machine_irq == 0) {
854 XEN_PT_LOG(d, "machine irq is 0\n");
855 cmd |= PCI_COMMAND_INTX_DISABLE;
856 goto out;
857 }
858
859 rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq);
860 if (rc < 0) {
861 XEN_PT_ERR(d, "Mapping machine irq %u to pirq %i failed, (err: %d)\n",
862 machine_irq, pirq, errno);
863
864 /* Disable PCI intx assertion (turn on bit10 of devctl) */
865 cmd |= PCI_COMMAND_INTX_DISABLE;
866 machine_irq = 0;
867 s->machine_irq = 0;
868 } else {
869 machine_irq = pirq;
870 s->machine_irq = pirq;
871 xen_pt_mapped_machine_irq[machine_irq]++;
872 }
873
874 /* bind machine_irq to device */
875 if (machine_irq != 0) {
876 uint8_t e_intx = xen_pt_pci_intx(s);
877
878 rc = xc_domain_bind_pt_pci_irq(xen_xc, xen_domid, machine_irq,
879 pci_dev_bus_num(d),
880 PCI_SLOT(d->devfn),
881 e_intx);
882 if (rc < 0) {
883 XEN_PT_ERR(d, "Binding of interrupt %i failed! (err: %d)\n",
884 e_intx, errno);
885
886 /* Disable PCI intx assertion (turn on bit10 of devctl) */
887 cmd |= PCI_COMMAND_INTX_DISABLE;
888 xen_pt_mapped_machine_irq[machine_irq]--;
889
890 if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
891 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq)) {
892 XEN_PT_ERR(d, "Unmapping of machine interrupt %i failed!"
893 " (err: %d)\n", machine_irq, errno);
894 }
895 }
896 s->machine_irq = 0;
897 }
898 }
899
900 out:
901 if (cmd) {
902 uint16_t val;
903
904 rc = xen_host_pci_get_word(&s->real_device, PCI_COMMAND, &val);
905 if (rc) {
906 error_setg_errno(errp, errno, "Failed to read PCI_COMMAND");
907 goto err_out;
908 } else {
909 val |= cmd;
910 rc = xen_host_pci_set_word(&s->real_device, PCI_COMMAND, val);
911 if (rc) {
912 error_setg_errno(errp, errno, "Failed to write PCI_COMMAND"
913 " val = 0x%x", val);
914 goto err_out;
915 }
916 }
917 }
918
919 memory_listener_register(&s->memory_listener, &address_space_memory);
920 memory_listener_register(&s->io_listener, &address_space_io);
921 s->listener_set = true;
922 XEN_PT_LOG(d,
923 "Real physical device %02x:%02x.%d registered successfully\n",
924 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function);
925
926 return;
927
928 err_out:
929 for (i = 0; i < PCI_ROM_SLOT; i++) {
930 object_unparent(OBJECT(&s->bar[i]));
931 }
932 object_unparent(OBJECT(&s->rom));
933
934 xen_pt_destroy(d);
935 assert(rc);
936 }
937
938 static void xen_pt_unregister_device(PCIDevice *d)
939 {
940 xen_pt_destroy(d);
941 }
942
943 static Property xen_pci_passthrough_properties[] = {
944 DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr),
945 DEFINE_PROP_BOOL("permissive", XenPCIPassthroughState, permissive, false),
946 DEFINE_PROP_END_OF_LIST(),
947 };
948
949 static void xen_pci_passthrough_instance_init(Object *obj)
950 {
951 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
952 * line, therefore, no need to wait to realize like other devices */
953 PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
954 }
955
956 static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
957 {
958 DeviceClass *dc = DEVICE_CLASS(klass);
959 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
960
961 k->realize = xen_pt_realize;
962 k->exit = xen_pt_unregister_device;
963 k->config_read = xen_pt_pci_read_config;
964 k->config_write = xen_pt_pci_write_config;
965 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
966 dc->desc = "Assign an host PCI device with Xen";
967 device_class_set_props(dc, xen_pci_passthrough_properties);
968 };
969
970 static void xen_pci_passthrough_finalize(Object *obj)
971 {
972 XenPCIPassthroughState *s = XEN_PT_DEVICE(obj);
973
974 xen_pt_msix_delete(s);
975 }
976
977 static const TypeInfo xen_pci_passthrough_info = {
978 .name = TYPE_XEN_PT_DEVICE,
979 .parent = TYPE_PCI_DEVICE,
980 .instance_size = sizeof(XenPCIPassthroughState),
981 .instance_finalize = xen_pci_passthrough_finalize,
982 .class_init = xen_pci_passthrough_class_init,
983 .instance_init = xen_pci_passthrough_instance_init,
984 .interfaces = (InterfaceInfo[]) {
985 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
986 { INTERFACE_PCIE_DEVICE },
987 { },
988 },
989 };
990
991 static void xen_pci_passthrough_register_types(void)
992 {
993 type_register_static(&xen_pci_passthrough_info);
994 }
995
996 type_init(xen_pci_passthrough_register_types)