pcie_aer: support configurable AER capa version
[qemu.git] / hw / vfio / common.c
1 /*
2 * generic functions used by VFIO devices
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <alex.williamson@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
19 */
20
21 #include "qemu/osdep.h"
22 #include <sys/ioctl.h>
23 #ifdef CONFIG_KVM
24 #include <linux/kvm.h>
25 #endif
26 #include <linux/vfio.h>
27
28 #include "hw/vfio/vfio-common.h"
29 #include "hw/vfio/vfio.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/hw.h"
33 #include "qemu/error-report.h"
34 #include "qemu/range.h"
35 #include "sysemu/kvm.h"
36 #include "trace.h"
37 #include "qapi/error.h"
38
39 struct vfio_group_head vfio_group_list =
40 QLIST_HEAD_INITIALIZER(vfio_group_list);
41 struct vfio_as_head vfio_address_spaces =
42 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
43
44 #ifdef CONFIG_KVM
45 /*
46 * We have a single VFIO pseudo device per KVM VM. Once created it lives
47 * for the life of the VM. Closing the file descriptor only drops our
48 * reference to it and the device's reference to kvm. Therefore once
49 * initialized, this file descriptor is only released on QEMU exit and
50 * we'll re-use it should another vfio device be attached before then.
51 */
52 static int vfio_kvm_device_fd = -1;
53 #endif
54
55 /*
56 * Common VFIO interrupt disable
57 */
58 void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
59 {
60 struct vfio_irq_set irq_set = {
61 .argsz = sizeof(irq_set),
62 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
63 .index = index,
64 .start = 0,
65 .count = 0,
66 };
67
68 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
69 }
70
71 void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
72 {
73 struct vfio_irq_set irq_set = {
74 .argsz = sizeof(irq_set),
75 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
76 .index = index,
77 .start = 0,
78 .count = 1,
79 };
80
81 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
82 }
83
84 void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
85 {
86 struct vfio_irq_set irq_set = {
87 .argsz = sizeof(irq_set),
88 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
89 .index = index,
90 .start = 0,
91 .count = 1,
92 };
93
94 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
95 }
96
97 /*
98 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
99 */
100 void vfio_region_write(void *opaque, hwaddr addr,
101 uint64_t data, unsigned size)
102 {
103 VFIORegion *region = opaque;
104 VFIODevice *vbasedev = region->vbasedev;
105 union {
106 uint8_t byte;
107 uint16_t word;
108 uint32_t dword;
109 uint64_t qword;
110 } buf;
111
112 switch (size) {
113 case 1:
114 buf.byte = data;
115 break;
116 case 2:
117 buf.word = cpu_to_le16(data);
118 break;
119 case 4:
120 buf.dword = cpu_to_le32(data);
121 break;
122 default:
123 hw_error("vfio: unsupported write size, %d bytes", size);
124 break;
125 }
126
127 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
128 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
129 ",%d) failed: %m",
130 __func__, vbasedev->name, region->nr,
131 addr, data, size);
132 }
133
134 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
135
136 /*
137 * A read or write to a BAR always signals an INTx EOI. This will
138 * do nothing if not pending (including not in INTx mode). We assume
139 * that a BAR access is in response to an interrupt and that BAR
140 * accesses will service the interrupt. Unfortunately, we don't know
141 * which access will service the interrupt, so we're potentially
142 * getting quite a few host interrupts per guest interrupt.
143 */
144 vbasedev->ops->vfio_eoi(vbasedev);
145 }
146
147 uint64_t vfio_region_read(void *opaque,
148 hwaddr addr, unsigned size)
149 {
150 VFIORegion *region = opaque;
151 VFIODevice *vbasedev = region->vbasedev;
152 union {
153 uint8_t byte;
154 uint16_t word;
155 uint32_t dword;
156 uint64_t qword;
157 } buf;
158 uint64_t data = 0;
159
160 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
161 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
162 __func__, vbasedev->name, region->nr,
163 addr, size);
164 return (uint64_t)-1;
165 }
166 switch (size) {
167 case 1:
168 data = buf.byte;
169 break;
170 case 2:
171 data = le16_to_cpu(buf.word);
172 break;
173 case 4:
174 data = le32_to_cpu(buf.dword);
175 break;
176 default:
177 hw_error("vfio: unsupported read size, %d bytes", size);
178 break;
179 }
180
181 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
182
183 /* Same as write above */
184 vbasedev->ops->vfio_eoi(vbasedev);
185
186 return data;
187 }
188
189 const MemoryRegionOps vfio_region_ops = {
190 .read = vfio_region_read,
191 .write = vfio_region_write,
192 .endianness = DEVICE_LITTLE_ENDIAN,
193 };
194
195 /*
196 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
197 */
198 static int vfio_dma_unmap(VFIOContainer *container,
199 hwaddr iova, ram_addr_t size)
200 {
201 struct vfio_iommu_type1_dma_unmap unmap = {
202 .argsz = sizeof(unmap),
203 .flags = 0,
204 .iova = iova,
205 .size = size,
206 };
207
208 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
209 error_report("VFIO_UNMAP_DMA: %d", -errno);
210 return -errno;
211 }
212
213 return 0;
214 }
215
216 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
217 ram_addr_t size, void *vaddr, bool readonly)
218 {
219 struct vfio_iommu_type1_dma_map map = {
220 .argsz = sizeof(map),
221 .flags = VFIO_DMA_MAP_FLAG_READ,
222 .vaddr = (__u64)(uintptr_t)vaddr,
223 .iova = iova,
224 .size = size,
225 };
226
227 if (!readonly) {
228 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
229 }
230
231 /*
232 * Try the mapping, if it fails with EBUSY, unmap the region and try
233 * again. This shouldn't be necessary, but we sometimes see it in
234 * the VGA ROM space.
235 */
236 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
237 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
238 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
239 return 0;
240 }
241
242 error_report("VFIO_MAP_DMA: %d", -errno);
243 return -errno;
244 }
245
246 static void vfio_host_win_add(VFIOContainer *container,
247 hwaddr min_iova, hwaddr max_iova,
248 uint64_t iova_pgsizes)
249 {
250 VFIOHostDMAWindow *hostwin;
251
252 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
253 if (ranges_overlap(hostwin->min_iova,
254 hostwin->max_iova - hostwin->min_iova + 1,
255 min_iova,
256 max_iova - min_iova + 1)) {
257 hw_error("%s: Overlapped IOMMU are not enabled", __func__);
258 }
259 }
260
261 hostwin = g_malloc0(sizeof(*hostwin));
262
263 hostwin->min_iova = min_iova;
264 hostwin->max_iova = max_iova;
265 hostwin->iova_pgsizes = iova_pgsizes;
266 QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
267 }
268
269 static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
270 hwaddr max_iova)
271 {
272 VFIOHostDMAWindow *hostwin;
273
274 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
275 if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
276 QLIST_REMOVE(hostwin, hostwin_next);
277 return 0;
278 }
279 }
280
281 return -1;
282 }
283
284 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
285 {
286 return (!memory_region_is_ram(section->mr) &&
287 !memory_region_is_iommu(section->mr)) ||
288 /*
289 * Sizing an enabled 64-bit BAR can cause spurious mappings to
290 * addresses in the upper part of the 64-bit address space. These
291 * are never accessed by the CPU and beyond the address width of
292 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
293 */
294 section->offset_within_address_space & (1ULL << 63);
295 }
296
297 static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
298 {
299 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
300 VFIOContainer *container = giommu->container;
301 hwaddr iova = iotlb->iova + giommu->iommu_offset;
302 MemoryRegion *mr;
303 hwaddr xlat;
304 hwaddr len = iotlb->addr_mask + 1;
305 void *vaddr;
306 int ret;
307
308 trace_vfio_iommu_map_notify(iova, iova + iotlb->addr_mask);
309
310 if (iotlb->target_as != &address_space_memory) {
311 error_report("Wrong target AS \"%s\", only system memory is allowed",
312 iotlb->target_as->name ? iotlb->target_as->name : "none");
313 return;
314 }
315
316 /*
317 * The IOMMU TLB entry we have just covers translation through
318 * this IOMMU to its immediate target. We need to translate
319 * it the rest of the way through to memory.
320 */
321 rcu_read_lock();
322 mr = address_space_translate(&address_space_memory,
323 iotlb->translated_addr,
324 &xlat, &len, iotlb->perm & IOMMU_WO);
325 if (!memory_region_is_ram(mr)) {
326 error_report("iommu map to non memory area %"HWADDR_PRIx"",
327 xlat);
328 goto out;
329 }
330 /*
331 * Translation truncates length to the IOMMU page size,
332 * check that it did not truncate too much.
333 */
334 if (len & iotlb->addr_mask) {
335 error_report("iommu has granularity incompatible with target AS");
336 goto out;
337 }
338
339 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
340 vaddr = memory_region_get_ram_ptr(mr) + xlat;
341 ret = vfio_dma_map(container, iova,
342 iotlb->addr_mask + 1, vaddr,
343 !(iotlb->perm & IOMMU_WO) || mr->readonly);
344 if (ret) {
345 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
346 "0x%"HWADDR_PRIx", %p) = %d (%m)",
347 container, iova,
348 iotlb->addr_mask + 1, vaddr, ret);
349 }
350 } else {
351 ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1);
352 if (ret) {
353 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
354 "0x%"HWADDR_PRIx") = %d (%m)",
355 container, iova,
356 iotlb->addr_mask + 1, ret);
357 }
358 }
359 out:
360 rcu_read_unlock();
361 }
362
363 static void vfio_listener_region_add(MemoryListener *listener,
364 MemoryRegionSection *section)
365 {
366 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
367 hwaddr iova, end;
368 Int128 llend, llsize;
369 void *vaddr;
370 int ret;
371 VFIOHostDMAWindow *hostwin;
372 bool hostwin_found;
373
374 if (vfio_listener_skipped_section(section)) {
375 trace_vfio_listener_region_add_skip(
376 section->offset_within_address_space,
377 section->offset_within_address_space +
378 int128_get64(int128_sub(section->size, int128_one())));
379 return;
380 }
381
382 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
383 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
384 error_report("%s received unaligned region", __func__);
385 return;
386 }
387
388 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
389 llend = int128_make64(section->offset_within_address_space);
390 llend = int128_add(llend, section->size);
391 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
392
393 if (int128_ge(int128_make64(iova), llend)) {
394 return;
395 }
396 end = int128_get64(int128_sub(llend, int128_one()));
397
398 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
399 VFIOHostDMAWindow *hostwin;
400 hwaddr pgsize = 0;
401
402 /* For now intersections are not allowed, we may relax this later */
403 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
404 if (ranges_overlap(hostwin->min_iova,
405 hostwin->max_iova - hostwin->min_iova + 1,
406 section->offset_within_address_space,
407 int128_get64(section->size))) {
408 ret = -1;
409 goto fail;
410 }
411 }
412
413 ret = vfio_spapr_create_window(container, section, &pgsize);
414 if (ret) {
415 goto fail;
416 }
417
418 vfio_host_win_add(container, section->offset_within_address_space,
419 section->offset_within_address_space +
420 int128_get64(section->size) - 1, pgsize);
421 }
422
423 hostwin_found = false;
424 QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
425 if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
426 hostwin_found = true;
427 break;
428 }
429 }
430
431 if (!hostwin_found) {
432 error_report("vfio: IOMMU container %p can't map guest IOVA region"
433 " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
434 container, iova, end);
435 ret = -EFAULT;
436 goto fail;
437 }
438
439 memory_region_ref(section->mr);
440
441 if (memory_region_is_iommu(section->mr)) {
442 VFIOGuestIOMMU *giommu;
443
444 trace_vfio_listener_region_add_iommu(iova, end);
445 /*
446 * FIXME: For VFIO iommu types which have KVM acceleration to
447 * avoid bouncing all map/unmaps through qemu this way, this
448 * would be the right place to wire that up (tell the KVM
449 * device emulation the VFIO iommu handles to use).
450 */
451 giommu = g_malloc0(sizeof(*giommu));
452 giommu->iommu = section->mr;
453 giommu->iommu_offset = section->offset_within_address_space -
454 section->offset_within_region;
455 giommu->container = container;
456 giommu->n.notify = vfio_iommu_map_notify;
457 giommu->n.notifier_flags = IOMMU_NOTIFIER_ALL;
458 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
459
460 memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
461 memory_region_iommu_replay(giommu->iommu, &giommu->n, false);
462
463 return;
464 }
465
466 /* Here we assume that memory_region_is_ram(section->mr)==true */
467
468 vaddr = memory_region_get_ram_ptr(section->mr) +
469 section->offset_within_region +
470 (iova - section->offset_within_address_space);
471
472 trace_vfio_listener_region_add_ram(iova, end, vaddr);
473
474 llsize = int128_sub(llend, int128_make64(iova));
475
476 ret = vfio_dma_map(container, iova, int128_get64(llsize),
477 vaddr, section->readonly);
478 if (ret) {
479 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
480 "0x%"HWADDR_PRIx", %p) = %d (%m)",
481 container, iova, int128_get64(llsize), vaddr, ret);
482 goto fail;
483 }
484
485 return;
486
487 fail:
488 /*
489 * On the initfn path, store the first error in the container so we
490 * can gracefully fail. Runtime, there's not much we can do other
491 * than throw a hardware error.
492 */
493 if (!container->initialized) {
494 if (!container->error) {
495 container->error = ret;
496 }
497 } else {
498 hw_error("vfio: DMA mapping failed, unable to continue");
499 }
500 }
501
502 static void vfio_listener_region_del(MemoryListener *listener,
503 MemoryRegionSection *section)
504 {
505 VFIOContainer *container = container_of(listener, VFIOContainer, listener);
506 hwaddr iova, end;
507 Int128 llend, llsize;
508 int ret;
509
510 if (vfio_listener_skipped_section(section)) {
511 trace_vfio_listener_region_del_skip(
512 section->offset_within_address_space,
513 section->offset_within_address_space +
514 int128_get64(int128_sub(section->size, int128_one())));
515 return;
516 }
517
518 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
519 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
520 error_report("%s received unaligned region", __func__);
521 return;
522 }
523
524 if (memory_region_is_iommu(section->mr)) {
525 VFIOGuestIOMMU *giommu;
526
527 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
528 if (giommu->iommu == section->mr) {
529 memory_region_unregister_iommu_notifier(giommu->iommu,
530 &giommu->n);
531 QLIST_REMOVE(giommu, giommu_next);
532 g_free(giommu);
533 break;
534 }
535 }
536
537 /*
538 * FIXME: We assume the one big unmap below is adequate to
539 * remove any individual page mappings in the IOMMU which
540 * might have been copied into VFIO. This works for a page table
541 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
542 * That may not be true for all IOMMU types.
543 */
544 }
545
546 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
547 llend = int128_make64(section->offset_within_address_space);
548 llend = int128_add(llend, section->size);
549 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
550
551 if (int128_ge(int128_make64(iova), llend)) {
552 return;
553 }
554 end = int128_get64(int128_sub(llend, int128_one()));
555
556 llsize = int128_sub(llend, int128_make64(iova));
557
558 trace_vfio_listener_region_del(iova, end);
559
560 ret = vfio_dma_unmap(container, iova, int128_get64(llsize));
561 memory_region_unref(section->mr);
562 if (ret) {
563 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
564 "0x%"HWADDR_PRIx") = %d (%m)",
565 container, iova, int128_get64(llsize), ret);
566 }
567
568 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
569 vfio_spapr_remove_window(container,
570 section->offset_within_address_space);
571 if (vfio_host_win_del(container,
572 section->offset_within_address_space,
573 section->offset_within_address_space +
574 int128_get64(section->size) - 1) < 0) {
575 hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
576 __func__, section->offset_within_address_space);
577 }
578 }
579 }
580
581 static const MemoryListener vfio_memory_listener = {
582 .region_add = vfio_listener_region_add,
583 .region_del = vfio_listener_region_del,
584 };
585
586 static void vfio_listener_release(VFIOContainer *container)
587 {
588 memory_listener_unregister(&container->listener);
589 if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
590 memory_listener_unregister(&container->prereg_listener);
591 }
592 }
593
594 static struct vfio_info_cap_header *
595 vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
596 {
597 struct vfio_info_cap_header *hdr;
598 void *ptr = info;
599
600 if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
601 return NULL;
602 }
603
604 for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
605 if (hdr->id == id) {
606 return hdr;
607 }
608 }
609
610 return NULL;
611 }
612
613 static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
614 struct vfio_region_info *info)
615 {
616 struct vfio_info_cap_header *hdr;
617 struct vfio_region_info_cap_sparse_mmap *sparse;
618 int i, j;
619
620 hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
621 if (!hdr) {
622 return -ENODEV;
623 }
624
625 sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
626
627 trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
628 region->nr, sparse->nr_areas);
629
630 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
631
632 for (i = 0, j = 0; i < sparse->nr_areas; i++) {
633 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
634 sparse->areas[i].offset +
635 sparse->areas[i].size);
636
637 if (sparse->areas[i].size) {
638 region->mmaps[j].offset = sparse->areas[i].offset;
639 region->mmaps[j].size = sparse->areas[i].size;
640 j++;
641 }
642 }
643
644 region->nr_mmaps = j;
645 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
646
647 return 0;
648 }
649
650 int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
651 int index, const char *name)
652 {
653 struct vfio_region_info *info;
654 int ret;
655
656 ret = vfio_get_region_info(vbasedev, index, &info);
657 if (ret) {
658 return ret;
659 }
660
661 region->vbasedev = vbasedev;
662 region->flags = info->flags;
663 region->size = info->size;
664 region->fd_offset = info->offset;
665 region->nr = index;
666
667 if (region->size) {
668 region->mem = g_new0(MemoryRegion, 1);
669 memory_region_init_io(region->mem, obj, &vfio_region_ops,
670 region, name, region->size);
671
672 if (!vbasedev->no_mmap &&
673 region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
674
675 ret = vfio_setup_region_sparse_mmaps(region, info);
676
677 if (ret) {
678 region->nr_mmaps = 1;
679 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
680 region->mmaps[0].offset = 0;
681 region->mmaps[0].size = region->size;
682 }
683 }
684 }
685
686 g_free(info);
687
688 trace_vfio_region_setup(vbasedev->name, index, name,
689 region->flags, region->fd_offset, region->size);
690 return 0;
691 }
692
693 int vfio_region_mmap(VFIORegion *region)
694 {
695 int i, prot = 0;
696 char *name;
697
698 if (!region->mem) {
699 return 0;
700 }
701
702 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
703 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
704
705 for (i = 0; i < region->nr_mmaps; i++) {
706 region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
707 MAP_SHARED, region->vbasedev->fd,
708 region->fd_offset +
709 region->mmaps[i].offset);
710 if (region->mmaps[i].mmap == MAP_FAILED) {
711 int ret = -errno;
712
713 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
714 region->fd_offset +
715 region->mmaps[i].offset,
716 region->fd_offset +
717 region->mmaps[i].offset +
718 region->mmaps[i].size - 1, ret);
719
720 region->mmaps[i].mmap = NULL;
721
722 for (i--; i >= 0; i--) {
723 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
724 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
725 object_unparent(OBJECT(&region->mmaps[i].mem));
726 region->mmaps[i].mmap = NULL;
727 }
728
729 return ret;
730 }
731
732 name = g_strdup_printf("%s mmaps[%d]",
733 memory_region_name(region->mem), i);
734 memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
735 memory_region_owner(region->mem),
736 name, region->mmaps[i].size,
737 region->mmaps[i].mmap);
738 g_free(name);
739 memory_region_add_subregion(region->mem, region->mmaps[i].offset,
740 &region->mmaps[i].mem);
741
742 trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
743 region->mmaps[i].offset,
744 region->mmaps[i].offset +
745 region->mmaps[i].size - 1);
746 }
747
748 return 0;
749 }
750
751 void vfio_region_exit(VFIORegion *region)
752 {
753 int i;
754
755 if (!region->mem) {
756 return;
757 }
758
759 for (i = 0; i < region->nr_mmaps; i++) {
760 if (region->mmaps[i].mmap) {
761 memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
762 }
763 }
764
765 trace_vfio_region_exit(region->vbasedev->name, region->nr);
766 }
767
768 void vfio_region_finalize(VFIORegion *region)
769 {
770 int i;
771
772 if (!region->mem) {
773 return;
774 }
775
776 for (i = 0; i < region->nr_mmaps; i++) {
777 if (region->mmaps[i].mmap) {
778 munmap(region->mmaps[i].mmap, region->mmaps[i].size);
779 object_unparent(OBJECT(&region->mmaps[i].mem));
780 }
781 }
782
783 object_unparent(OBJECT(region->mem));
784
785 g_free(region->mem);
786 g_free(region->mmaps);
787
788 trace_vfio_region_finalize(region->vbasedev->name, region->nr);
789 }
790
791 void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
792 {
793 int i;
794
795 if (!region->mem) {
796 return;
797 }
798
799 for (i = 0; i < region->nr_mmaps; i++) {
800 if (region->mmaps[i].mmap) {
801 memory_region_set_enabled(&region->mmaps[i].mem, enabled);
802 }
803 }
804
805 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
806 enabled);
807 }
808
809 void vfio_reset_handler(void *opaque)
810 {
811 VFIOGroup *group;
812 VFIODevice *vbasedev;
813
814 QLIST_FOREACH(group, &vfio_group_list, next) {
815 QLIST_FOREACH(vbasedev, &group->device_list, next) {
816 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
817 }
818 }
819
820 QLIST_FOREACH(group, &vfio_group_list, next) {
821 QLIST_FOREACH(vbasedev, &group->device_list, next) {
822 if (vbasedev->needs_reset) {
823 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
824 }
825 }
826 }
827 }
828
829 static void vfio_kvm_device_add_group(VFIOGroup *group)
830 {
831 #ifdef CONFIG_KVM
832 struct kvm_device_attr attr = {
833 .group = KVM_DEV_VFIO_GROUP,
834 .attr = KVM_DEV_VFIO_GROUP_ADD,
835 .addr = (uint64_t)(unsigned long)&group->fd,
836 };
837
838 if (!kvm_enabled()) {
839 return;
840 }
841
842 if (vfio_kvm_device_fd < 0) {
843 struct kvm_create_device cd = {
844 .type = KVM_DEV_TYPE_VFIO,
845 };
846
847 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
848 error_report("Failed to create KVM VFIO device: %m");
849 return;
850 }
851
852 vfio_kvm_device_fd = cd.fd;
853 }
854
855 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
856 error_report("Failed to add group %d to KVM VFIO device: %m",
857 group->groupid);
858 }
859 #endif
860 }
861
862 static void vfio_kvm_device_del_group(VFIOGroup *group)
863 {
864 #ifdef CONFIG_KVM
865 struct kvm_device_attr attr = {
866 .group = KVM_DEV_VFIO_GROUP,
867 .attr = KVM_DEV_VFIO_GROUP_DEL,
868 .addr = (uint64_t)(unsigned long)&group->fd,
869 };
870
871 if (vfio_kvm_device_fd < 0) {
872 return;
873 }
874
875 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
876 error_report("Failed to remove group %d from KVM VFIO device: %m",
877 group->groupid);
878 }
879 #endif
880 }
881
882 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
883 {
884 VFIOAddressSpace *space;
885
886 QLIST_FOREACH(space, &vfio_address_spaces, list) {
887 if (space->as == as) {
888 return space;
889 }
890 }
891
892 /* No suitable VFIOAddressSpace, create a new one */
893 space = g_malloc0(sizeof(*space));
894 space->as = as;
895 QLIST_INIT(&space->containers);
896
897 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
898
899 return space;
900 }
901
902 static void vfio_put_address_space(VFIOAddressSpace *space)
903 {
904 if (QLIST_EMPTY(&space->containers)) {
905 QLIST_REMOVE(space, list);
906 g_free(space);
907 }
908 }
909
910 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
911 Error **errp)
912 {
913 VFIOContainer *container;
914 int ret, fd;
915 VFIOAddressSpace *space;
916
917 space = vfio_get_address_space(as);
918
919 QLIST_FOREACH(container, &space->containers, next) {
920 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
921 group->container = container;
922 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
923 return 0;
924 }
925 }
926
927 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
928 if (fd < 0) {
929 error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
930 ret = -errno;
931 goto put_space_exit;
932 }
933
934 ret = ioctl(fd, VFIO_GET_API_VERSION);
935 if (ret != VFIO_API_VERSION) {
936 error_setg(errp, "supported vfio version: %d, "
937 "reported version: %d", VFIO_API_VERSION, ret);
938 ret = -EINVAL;
939 goto close_fd_exit;
940 }
941
942 container = g_malloc0(sizeof(*container));
943 container->space = space;
944 container->fd = fd;
945 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
946 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
947 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
948 struct vfio_iommu_type1_info info;
949
950 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
951 if (ret) {
952 error_setg_errno(errp, errno, "failed to set group container");
953 ret = -errno;
954 goto free_container_exit;
955 }
956
957 container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU;
958 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
959 if (ret) {
960 error_setg_errno(errp, errno, "failed to set iommu for container");
961 ret = -errno;
962 goto free_container_exit;
963 }
964
965 /*
966 * FIXME: This assumes that a Type1 IOMMU can map any 64-bit
967 * IOVA whatsoever. That's not actually true, but the current
968 * kernel interface doesn't tell us what it can map, and the
969 * existing Type1 IOMMUs generally support any IOVA we're
970 * going to actually try in practice.
971 */
972 info.argsz = sizeof(info);
973 ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
974 /* Ignore errors */
975 if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
976 /* Assume 4k IOVA page size */
977 info.iova_pgsizes = 4096;
978 }
979 vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
980 } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
981 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
982 struct vfio_iommu_spapr_tce_info info;
983 bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU);
984
985 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
986 if (ret) {
987 error_setg_errno(errp, errno, "failed to set group container");
988 ret = -errno;
989 goto free_container_exit;
990 }
991 container->iommu_type =
992 v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU;
993 ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
994 if (ret) {
995 error_setg_errno(errp, errno, "failed to set iommu for container");
996 ret = -errno;
997 goto free_container_exit;
998 }
999
1000 /*
1001 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
1002 * when container fd is closed so we do not call it explicitly
1003 * in this file.
1004 */
1005 if (!v2) {
1006 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
1007 if (ret) {
1008 error_setg_errno(errp, errno, "failed to enable container");
1009 ret = -errno;
1010 goto free_container_exit;
1011 }
1012 } else {
1013 container->prereg_listener = vfio_prereg_listener;
1014
1015 memory_listener_register(&container->prereg_listener,
1016 &address_space_memory);
1017 if (container->error) {
1018 memory_listener_unregister(&container->prereg_listener);
1019 ret = container->error;
1020 error_setg(errp,
1021 "RAM memory listener initialization failed for container");
1022 goto free_container_exit;
1023 }
1024 }
1025
1026 info.argsz = sizeof(info);
1027 ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1028 if (ret) {
1029 error_setg_errno(errp, errno,
1030 "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
1031 ret = -errno;
1032 if (v2) {
1033 memory_listener_unregister(&container->prereg_listener);
1034 }
1035 goto free_container_exit;
1036 }
1037
1038 if (v2) {
1039 /*
1040 * There is a default window in just created container.
1041 * To make region_add/del simpler, we better remove this
1042 * window now and let those iommu_listener callbacks
1043 * create/remove them when needed.
1044 */
1045 ret = vfio_spapr_remove_window(container, info.dma32_window_start);
1046 if (ret) {
1047 error_setg_errno(errp, -ret,
1048 "failed to remove existing window");
1049 goto free_container_exit;
1050 }
1051 } else {
1052 /* The default table uses 4K pages */
1053 vfio_host_win_add(container, info.dma32_window_start,
1054 info.dma32_window_start +
1055 info.dma32_window_size - 1,
1056 0x1000);
1057 }
1058 } else {
1059 error_setg(errp, "No available IOMMU models");
1060 ret = -EINVAL;
1061 goto free_container_exit;
1062 }
1063
1064 container->listener = vfio_memory_listener;
1065
1066 memory_listener_register(&container->listener, container->space->as);
1067
1068 if (container->error) {
1069 ret = container->error;
1070 error_setg_errno(errp, -ret,
1071 "memory listener initialization failed for container");
1072 goto listener_release_exit;
1073 }
1074
1075 container->initialized = true;
1076
1077 QLIST_INIT(&container->group_list);
1078 QLIST_INSERT_HEAD(&space->containers, container, next);
1079
1080 group->container = container;
1081 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
1082
1083 return 0;
1084 listener_release_exit:
1085 vfio_listener_release(container);
1086
1087 free_container_exit:
1088 g_free(container);
1089
1090 close_fd_exit:
1091 close(fd);
1092
1093 put_space_exit:
1094 vfio_put_address_space(space);
1095
1096 return ret;
1097 }
1098
1099 static void vfio_disconnect_container(VFIOGroup *group)
1100 {
1101 VFIOContainer *container = group->container;
1102
1103 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
1104 error_report("vfio: error disconnecting group %d from container",
1105 group->groupid);
1106 }
1107
1108 QLIST_REMOVE(group, container_next);
1109 group->container = NULL;
1110
1111 if (QLIST_EMPTY(&container->group_list)) {
1112 VFIOAddressSpace *space = container->space;
1113 VFIOGuestIOMMU *giommu, *tmp;
1114
1115 vfio_listener_release(container);
1116 QLIST_REMOVE(container, next);
1117
1118 QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
1119 memory_region_unregister_iommu_notifier(giommu->iommu, &giommu->n);
1120 QLIST_REMOVE(giommu, giommu_next);
1121 g_free(giommu);
1122 }
1123
1124 trace_vfio_disconnect_container(container->fd);
1125 close(container->fd);
1126 g_free(container);
1127
1128 vfio_put_address_space(space);
1129 }
1130 }
1131
1132 VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
1133 {
1134 VFIOGroup *group;
1135 char path[32];
1136 struct vfio_group_status status = { .argsz = sizeof(status) };
1137
1138 QLIST_FOREACH(group, &vfio_group_list, next) {
1139 if (group->groupid == groupid) {
1140 /* Found it. Now is it already in the right context? */
1141 if (group->container->space->as == as) {
1142 return group;
1143 } else {
1144 error_setg(errp, "group %d used in multiple address spaces",
1145 group->groupid);
1146 return NULL;
1147 }
1148 }
1149 }
1150
1151 group = g_malloc0(sizeof(*group));
1152
1153 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
1154 group->fd = qemu_open(path, O_RDWR);
1155 if (group->fd < 0) {
1156 error_setg_errno(errp, errno, "failed to open %s", path);
1157 goto free_group_exit;
1158 }
1159
1160 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
1161 error_setg_errno(errp, errno, "failed to get group %d status", groupid);
1162 goto close_fd_exit;
1163 }
1164
1165 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
1166 error_setg(errp, "group %d is not viable", groupid);
1167 error_append_hint(errp,
1168 "Please ensure all devices within the iommu_group "
1169 "are bound to their vfio bus driver.\n");
1170 goto close_fd_exit;
1171 }
1172
1173 group->groupid = groupid;
1174 QLIST_INIT(&group->device_list);
1175
1176 if (vfio_connect_container(group, as, errp)) {
1177 error_prepend(errp, "failed to setup container for group %d: ",
1178 groupid);
1179 goto close_fd_exit;
1180 }
1181
1182 if (QLIST_EMPTY(&vfio_group_list)) {
1183 qemu_register_reset(vfio_reset_handler, NULL);
1184 }
1185
1186 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
1187
1188 vfio_kvm_device_add_group(group);
1189
1190 return group;
1191
1192 close_fd_exit:
1193 close(group->fd);
1194
1195 free_group_exit:
1196 g_free(group);
1197
1198 return NULL;
1199 }
1200
1201 void vfio_put_group(VFIOGroup *group)
1202 {
1203 if (!group || !QLIST_EMPTY(&group->device_list)) {
1204 return;
1205 }
1206
1207 vfio_kvm_device_del_group(group);
1208 vfio_disconnect_container(group);
1209 QLIST_REMOVE(group, next);
1210 trace_vfio_put_group(group->fd);
1211 close(group->fd);
1212 g_free(group);
1213
1214 if (QLIST_EMPTY(&vfio_group_list)) {
1215 qemu_unregister_reset(vfio_reset_handler, NULL);
1216 }
1217 }
1218
1219 int vfio_get_device(VFIOGroup *group, const char *name,
1220 VFIODevice *vbasedev, Error **errp)
1221 {
1222 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
1223 int ret, fd;
1224
1225 fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
1226 if (fd < 0) {
1227 error_setg_errno(errp, errno, "error getting device from group %d",
1228 group->groupid);
1229 error_append_hint(errp,
1230 "Verify all devices in group %d are bound to vfio-<bus> "
1231 "or pci-stub and not already in use\n", group->groupid);
1232 return fd;
1233 }
1234
1235 ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
1236 if (ret) {
1237 error_setg_errno(errp, errno, "error getting device info");
1238 close(fd);
1239 return ret;
1240 }
1241
1242 vbasedev->fd = fd;
1243 vbasedev->group = group;
1244 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
1245
1246 vbasedev->num_irqs = dev_info.num_irqs;
1247 vbasedev->num_regions = dev_info.num_regions;
1248 vbasedev->flags = dev_info.flags;
1249
1250 trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions,
1251 dev_info.num_irqs);
1252
1253 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
1254 return 0;
1255 }
1256
1257 void vfio_put_base_device(VFIODevice *vbasedev)
1258 {
1259 if (!vbasedev->group) {
1260 return;
1261 }
1262 QLIST_REMOVE(vbasedev, next);
1263 vbasedev->group = NULL;
1264 trace_vfio_put_base_device(vbasedev->fd);
1265 close(vbasedev->fd);
1266 }
1267
1268 int vfio_get_region_info(VFIODevice *vbasedev, int index,
1269 struct vfio_region_info **info)
1270 {
1271 size_t argsz = sizeof(struct vfio_region_info);
1272
1273 *info = g_malloc0(argsz);
1274
1275 (*info)->index = index;
1276 retry:
1277 (*info)->argsz = argsz;
1278
1279 if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
1280 g_free(*info);
1281 *info = NULL;
1282 return -errno;
1283 }
1284
1285 if ((*info)->argsz > argsz) {
1286 argsz = (*info)->argsz;
1287 *info = g_realloc(*info, argsz);
1288
1289 goto retry;
1290 }
1291
1292 return 0;
1293 }
1294
1295 int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
1296 uint32_t subtype, struct vfio_region_info **info)
1297 {
1298 int i;
1299
1300 for (i = 0; i < vbasedev->num_regions; i++) {
1301 struct vfio_info_cap_header *hdr;
1302 struct vfio_region_info_cap_type *cap_type;
1303
1304 if (vfio_get_region_info(vbasedev, i, info)) {
1305 continue;
1306 }
1307
1308 hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
1309 if (!hdr) {
1310 g_free(*info);
1311 continue;
1312 }
1313
1314 cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
1315
1316 trace_vfio_get_dev_region(vbasedev->name, i,
1317 cap_type->type, cap_type->subtype);
1318
1319 if (cap_type->type == type && cap_type->subtype == subtype) {
1320 return 0;
1321 }
1322
1323 g_free(*info);
1324 }
1325
1326 *info = NULL;
1327 return -ENODEV;
1328 }
1329
1330 /*
1331 * Interfaces for IBM EEH (Enhanced Error Handling)
1332 */
1333 static bool vfio_eeh_container_ok(VFIOContainer *container)
1334 {
1335 /*
1336 * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
1337 * implementation is broken if there are multiple groups in a
1338 * container. The hardware works in units of Partitionable
1339 * Endpoints (== IOMMU groups) and the EEH operations naively
1340 * iterate across all groups in the container, without any logic
1341 * to make sure the groups have their state synchronized. For
1342 * certain operations (ENABLE) that might be ok, until an error
1343 * occurs, but for others (GET_STATE) it's clearly broken.
1344 */
1345
1346 /*
1347 * XXX Once fixed kernels exist, test for them here
1348 */
1349
1350 if (QLIST_EMPTY(&container->group_list)) {
1351 return false;
1352 }
1353
1354 if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
1355 return false;
1356 }
1357
1358 return true;
1359 }
1360
1361 static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
1362 {
1363 struct vfio_eeh_pe_op pe_op = {
1364 .argsz = sizeof(pe_op),
1365 .op = op,
1366 };
1367 int ret;
1368
1369 if (!vfio_eeh_container_ok(container)) {
1370 error_report("vfio/eeh: EEH_PE_OP 0x%x: "
1371 "kernel requires a container with exactly one group", op);
1372 return -EPERM;
1373 }
1374
1375 ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
1376 if (ret < 0) {
1377 error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
1378 return -errno;
1379 }
1380
1381 return ret;
1382 }
1383
1384 static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
1385 {
1386 VFIOAddressSpace *space = vfio_get_address_space(as);
1387 VFIOContainer *container = NULL;
1388
1389 if (QLIST_EMPTY(&space->containers)) {
1390 /* No containers to act on */
1391 goto out;
1392 }
1393
1394 container = QLIST_FIRST(&space->containers);
1395
1396 if (QLIST_NEXT(container, next)) {
1397 /* We don't yet have logic to synchronize EEH state across
1398 * multiple containers */
1399 container = NULL;
1400 goto out;
1401 }
1402
1403 out:
1404 vfio_put_address_space(space);
1405 return container;
1406 }
1407
1408 bool vfio_eeh_as_ok(AddressSpace *as)
1409 {
1410 VFIOContainer *container = vfio_eeh_as_container(as);
1411
1412 return (container != NULL) && vfio_eeh_container_ok(container);
1413 }
1414
1415 int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
1416 {
1417 VFIOContainer *container = vfio_eeh_as_container(as);
1418
1419 if (!container) {
1420 return -ENODEV;
1421 }
1422 return vfio_eeh_container_op(container, op);
1423 }