tests/acpi: add microvm test
[qemu.git] / hw / virtio / vhost.c
1 /*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "qemu/atomic.h"
20 #include "qemu/range.h"
21 #include "qemu/error-report.h"
22 #include "qemu/memfd.h"
23 #include "standard-headers/linux/vhost_types.h"
24 #include "exec/address-spaces.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/virtio/virtio-access.h"
27 #include "migration/blocker.h"
28 #include "migration/qemu-file-types.h"
29 #include "sysemu/dma.h"
30 #include "sysemu/tcg.h"
31 #include "trace.h"
32
33 /* enabled until disconnected backend stabilizes */
34 #define _VHOST_DEBUG 1
35
36 #ifdef _VHOST_DEBUG
37 #define VHOST_OPS_DEBUG(fmt, ...) \
38 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
39 strerror(errno), errno); } while (0)
40 #else
41 #define VHOST_OPS_DEBUG(fmt, ...) \
42 do { } while (0)
43 #endif
44
45 static struct vhost_log *vhost_log;
46 static struct vhost_log *vhost_log_shm;
47
48 static unsigned int used_memslots;
49 static QLIST_HEAD(, vhost_dev) vhost_devices =
50 QLIST_HEAD_INITIALIZER(vhost_devices);
51
52 bool vhost_has_free_slot(void)
53 {
54 unsigned int slots_limit = ~0U;
55 struct vhost_dev *hdev;
56
57 QLIST_FOREACH(hdev, &vhost_devices, entry) {
58 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
59 slots_limit = MIN(slots_limit, r);
60 }
61 return slots_limit > used_memslots;
62 }
63
64 static void vhost_dev_sync_region(struct vhost_dev *dev,
65 MemoryRegionSection *section,
66 uint64_t mfirst, uint64_t mlast,
67 uint64_t rfirst, uint64_t rlast)
68 {
69 vhost_log_chunk_t *log = dev->log->log;
70
71 uint64_t start = MAX(mfirst, rfirst);
72 uint64_t end = MIN(mlast, rlast);
73 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
74 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
75 uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
76
77 if (end < start) {
78 return;
79 }
80 assert(end / VHOST_LOG_CHUNK < dev->log_size);
81 assert(start / VHOST_LOG_CHUNK < dev->log_size);
82
83 for (;from < to; ++from) {
84 vhost_log_chunk_t log;
85 /* We first check with non-atomic: much cheaper,
86 * and we expect non-dirty to be the common case. */
87 if (!*from) {
88 addr += VHOST_LOG_CHUNK;
89 continue;
90 }
91 /* Data must be read atomically. We don't really need barrier semantics
92 * but it's easier to use atomic_* than roll our own. */
93 log = atomic_xchg(from, 0);
94 while (log) {
95 int bit = ctzl(log);
96 hwaddr page_addr;
97 hwaddr section_offset;
98 hwaddr mr_offset;
99 page_addr = addr + bit * VHOST_LOG_PAGE;
100 section_offset = page_addr - section->offset_within_address_space;
101 mr_offset = section_offset + section->offset_within_region;
102 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
103 log &= ~(0x1ull << bit);
104 }
105 addr += VHOST_LOG_CHUNK;
106 }
107 }
108
109 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
110 MemoryRegionSection *section,
111 hwaddr first,
112 hwaddr last)
113 {
114 int i;
115 hwaddr start_addr;
116 hwaddr end_addr;
117
118 if (!dev->log_enabled || !dev->started) {
119 return 0;
120 }
121 start_addr = section->offset_within_address_space;
122 end_addr = range_get_last(start_addr, int128_get64(section->size));
123 start_addr = MAX(first, start_addr);
124 end_addr = MIN(last, end_addr);
125
126 for (i = 0; i < dev->mem->nregions; ++i) {
127 struct vhost_memory_region *reg = dev->mem->regions + i;
128 vhost_dev_sync_region(dev, section, start_addr, end_addr,
129 reg->guest_phys_addr,
130 range_get_last(reg->guest_phys_addr,
131 reg->memory_size));
132 }
133 for (i = 0; i < dev->nvqs; ++i) {
134 struct vhost_virtqueue *vq = dev->vqs + i;
135
136 if (!vq->used_phys && !vq->used_size) {
137 continue;
138 }
139
140 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
141 range_get_last(vq->used_phys, vq->used_size));
142 }
143 return 0;
144 }
145
146 static void vhost_log_sync(MemoryListener *listener,
147 MemoryRegionSection *section)
148 {
149 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
150 memory_listener);
151 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
152 }
153
154 static void vhost_log_sync_range(struct vhost_dev *dev,
155 hwaddr first, hwaddr last)
156 {
157 int i;
158 /* FIXME: this is N^2 in number of sections */
159 for (i = 0; i < dev->n_mem_sections; ++i) {
160 MemoryRegionSection *section = &dev->mem_sections[i];
161 vhost_sync_dirty_bitmap(dev, section, first, last);
162 }
163 }
164
165 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
166 {
167 uint64_t log_size = 0;
168 int i;
169 for (i = 0; i < dev->mem->nregions; ++i) {
170 struct vhost_memory_region *reg = dev->mem->regions + i;
171 uint64_t last = range_get_last(reg->guest_phys_addr,
172 reg->memory_size);
173 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
174 }
175 for (i = 0; i < dev->nvqs; ++i) {
176 struct vhost_virtqueue *vq = dev->vqs + i;
177
178 if (!vq->used_phys && !vq->used_size) {
179 continue;
180 }
181
182 uint64_t last = vq->used_phys + vq->used_size - 1;
183 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
184 }
185 return log_size;
186 }
187
188 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
189 {
190 Error *err = NULL;
191 struct vhost_log *log;
192 uint64_t logsize = size * sizeof(*(log->log));
193 int fd = -1;
194
195 log = g_new0(struct vhost_log, 1);
196 if (share) {
197 log->log = qemu_memfd_alloc("vhost-log", logsize,
198 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
199 &fd, &err);
200 if (err) {
201 error_report_err(err);
202 g_free(log);
203 return NULL;
204 }
205 memset(log->log, 0, logsize);
206 } else {
207 log->log = g_malloc0(logsize);
208 }
209
210 log->size = size;
211 log->refcnt = 1;
212 log->fd = fd;
213
214 return log;
215 }
216
217 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
218 {
219 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
220
221 if (!log || log->size != size) {
222 log = vhost_log_alloc(size, share);
223 if (share) {
224 vhost_log_shm = log;
225 } else {
226 vhost_log = log;
227 }
228 } else {
229 ++log->refcnt;
230 }
231
232 return log;
233 }
234
235 static void vhost_log_put(struct vhost_dev *dev, bool sync)
236 {
237 struct vhost_log *log = dev->log;
238
239 if (!log) {
240 return;
241 }
242
243 --log->refcnt;
244 if (log->refcnt == 0) {
245 /* Sync only the range covered by the old log */
246 if (dev->log_size && sync) {
247 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
248 }
249
250 if (vhost_log == log) {
251 g_free(log->log);
252 vhost_log = NULL;
253 } else if (vhost_log_shm == log) {
254 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
255 log->fd);
256 vhost_log_shm = NULL;
257 }
258
259 g_free(log);
260 }
261
262 dev->log = NULL;
263 dev->log_size = 0;
264 }
265
266 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
267 {
268 return dev->vhost_ops->vhost_requires_shm_log &&
269 dev->vhost_ops->vhost_requires_shm_log(dev);
270 }
271
272 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
273 {
274 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
275 uint64_t log_base = (uintptr_t)log->log;
276 int r;
277
278 /* inform backend of log switching, this must be done before
279 releasing the current log, to ensure no logging is lost */
280 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
281 if (r < 0) {
282 VHOST_OPS_DEBUG("vhost_set_log_base failed");
283 }
284
285 vhost_log_put(dev, true);
286 dev->log = log;
287 dev->log_size = size;
288 }
289
290 static int vhost_dev_has_iommu(struct vhost_dev *dev)
291 {
292 VirtIODevice *vdev = dev->vdev;
293
294 /*
295 * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
296 * incremental memory mapping API via IOTLB API. For platform that
297 * does not have IOMMU, there's no need to enable this feature
298 * which may cause unnecessary IOTLB miss/update trnasactions.
299 */
300 return vdev->dma_as != &address_space_memory &&
301 virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
302 }
303
304 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
305 hwaddr *plen, bool is_write)
306 {
307 if (!vhost_dev_has_iommu(dev)) {
308 return cpu_physical_memory_map(addr, plen, is_write);
309 } else {
310 return (void *)(uintptr_t)addr;
311 }
312 }
313
314 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
315 hwaddr len, int is_write,
316 hwaddr access_len)
317 {
318 if (!vhost_dev_has_iommu(dev)) {
319 cpu_physical_memory_unmap(buffer, len, is_write, access_len);
320 }
321 }
322
323 static int vhost_verify_ring_part_mapping(void *ring_hva,
324 uint64_t ring_gpa,
325 uint64_t ring_size,
326 void *reg_hva,
327 uint64_t reg_gpa,
328 uint64_t reg_size)
329 {
330 uint64_t hva_ring_offset;
331 uint64_t ring_last = range_get_last(ring_gpa, ring_size);
332 uint64_t reg_last = range_get_last(reg_gpa, reg_size);
333
334 if (ring_last < reg_gpa || ring_gpa > reg_last) {
335 return 0;
336 }
337 /* check that whole ring's is mapped */
338 if (ring_last > reg_last) {
339 return -ENOMEM;
340 }
341 /* check that ring's MemoryRegion wasn't replaced */
342 hva_ring_offset = ring_gpa - reg_gpa;
343 if (ring_hva != reg_hva + hva_ring_offset) {
344 return -EBUSY;
345 }
346
347 return 0;
348 }
349
350 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
351 void *reg_hva,
352 uint64_t reg_gpa,
353 uint64_t reg_size)
354 {
355 int i, j;
356 int r = 0;
357 const char *part_name[] = {
358 "descriptor table",
359 "available ring",
360 "used ring"
361 };
362
363 if (vhost_dev_has_iommu(dev)) {
364 return 0;
365 }
366
367 for (i = 0; i < dev->nvqs; ++i) {
368 struct vhost_virtqueue *vq = dev->vqs + i;
369
370 if (vq->desc_phys == 0) {
371 continue;
372 }
373
374 j = 0;
375 r = vhost_verify_ring_part_mapping(
376 vq->desc, vq->desc_phys, vq->desc_size,
377 reg_hva, reg_gpa, reg_size);
378 if (r) {
379 break;
380 }
381
382 j++;
383 r = vhost_verify_ring_part_mapping(
384 vq->avail, vq->avail_phys, vq->avail_size,
385 reg_hva, reg_gpa, reg_size);
386 if (r) {
387 break;
388 }
389
390 j++;
391 r = vhost_verify_ring_part_mapping(
392 vq->used, vq->used_phys, vq->used_size,
393 reg_hva, reg_gpa, reg_size);
394 if (r) {
395 break;
396 }
397 }
398
399 if (r == -ENOMEM) {
400 error_report("Unable to map %s for ring %d", part_name[j], i);
401 } else if (r == -EBUSY) {
402 error_report("%s relocated for ring %d", part_name[j], i);
403 }
404 return r;
405 }
406
407 /*
408 * vhost_section: identify sections needed for vhost access
409 *
410 * We only care about RAM sections here (where virtqueue and guest
411 * internals accessed by virtio might live). If we find one we still
412 * allow the backend to potentially filter it out of our list.
413 */
414 static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
415 {
416 MemoryRegion *mr = section->mr;
417
418 if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) {
419 uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr);
420 uint8_t handled_dirty;
421
422 /*
423 * Kernel based vhost doesn't handle any block which is doing
424 * dirty-tracking other than migration for which it has
425 * specific logging support. However for TCG the kernel never
426 * gets involved anyway so we can also ignore it's
427 * self-modiying code detection flags. However a vhost-user
428 * client could still confuse a TCG guest if it re-writes
429 * executable memory that has already been translated.
430 */
431 handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) |
432 (1 << DIRTY_MEMORY_CODE);
433
434 if (dirty_mask & ~handled_dirty) {
435 trace_vhost_reject_section(mr->name, 1);
436 return false;
437 }
438
439 if (dev->vhost_ops->vhost_backend_mem_section_filter &&
440 !dev->vhost_ops->vhost_backend_mem_section_filter(dev, section)) {
441 trace_vhost_reject_section(mr->name, 2);
442 return false;
443 }
444
445 trace_vhost_section(mr->name);
446 return true;
447 } else {
448 trace_vhost_reject_section(mr->name, 3);
449 return false;
450 }
451 }
452
453 static void vhost_begin(MemoryListener *listener)
454 {
455 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
456 memory_listener);
457 dev->tmp_sections = NULL;
458 dev->n_tmp_sections = 0;
459 }
460
461 static void vhost_commit(MemoryListener *listener)
462 {
463 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
464 memory_listener);
465 MemoryRegionSection *old_sections;
466 int n_old_sections;
467 uint64_t log_size;
468 size_t regions_size;
469 int r;
470 int i;
471 bool changed = false;
472
473 /* Note we can be called before the device is started, but then
474 * starting the device calls set_mem_table, so we need to have
475 * built the data structures.
476 */
477 old_sections = dev->mem_sections;
478 n_old_sections = dev->n_mem_sections;
479 dev->mem_sections = dev->tmp_sections;
480 dev->n_mem_sections = dev->n_tmp_sections;
481
482 if (dev->n_mem_sections != n_old_sections) {
483 changed = true;
484 } else {
485 /* Same size, lets check the contents */
486 for (int i = 0; i < n_old_sections; i++) {
487 if (!MemoryRegionSection_eq(&old_sections[i],
488 &dev->mem_sections[i])) {
489 changed = true;
490 break;
491 }
492 }
493 }
494
495 trace_vhost_commit(dev->started, changed);
496 if (!changed) {
497 goto out;
498 }
499
500 /* Rebuild the regions list from the new sections list */
501 regions_size = offsetof(struct vhost_memory, regions) +
502 dev->n_mem_sections * sizeof dev->mem->regions[0];
503 dev->mem = g_realloc(dev->mem, regions_size);
504 dev->mem->nregions = dev->n_mem_sections;
505 used_memslots = dev->mem->nregions;
506 for (i = 0; i < dev->n_mem_sections; i++) {
507 struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
508 struct MemoryRegionSection *mrs = dev->mem_sections + i;
509
510 cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
511 cur_vmr->memory_size = int128_get64(mrs->size);
512 cur_vmr->userspace_addr =
513 (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
514 mrs->offset_within_region;
515 cur_vmr->flags_padding = 0;
516 }
517
518 if (!dev->started) {
519 goto out;
520 }
521
522 for (i = 0; i < dev->mem->nregions; i++) {
523 if (vhost_verify_ring_mappings(dev,
524 (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
525 dev->mem->regions[i].guest_phys_addr,
526 dev->mem->regions[i].memory_size)) {
527 error_report("Verify ring failure on region %d", i);
528 abort();
529 }
530 }
531
532 if (!dev->log_enabled) {
533 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
534 if (r < 0) {
535 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
536 }
537 goto out;
538 }
539 log_size = vhost_get_log_size(dev);
540 /* We allocate an extra 4K bytes to log,
541 * to reduce the * number of reallocations. */
542 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
543 /* To log more, must increase log size before table update. */
544 if (dev->log_size < log_size) {
545 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
546 }
547 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
548 if (r < 0) {
549 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
550 }
551 /* To log less, can only decrease log size after table update. */
552 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
553 vhost_dev_log_resize(dev, log_size);
554 }
555
556 out:
557 /* Deref the old list of sections, this must happen _after_ the
558 * vhost_set_mem_table to ensure the client isn't still using the
559 * section we're about to unref.
560 */
561 while (n_old_sections--) {
562 memory_region_unref(old_sections[n_old_sections].mr);
563 }
564 g_free(old_sections);
565 return;
566 }
567
568 /* Adds the section data to the tmp_section structure.
569 * It relies on the listener calling us in memory address order
570 * and for each region (via the _add and _nop methods) to
571 * join neighbours.
572 */
573 static void vhost_region_add_section(struct vhost_dev *dev,
574 MemoryRegionSection *section)
575 {
576 bool need_add = true;
577 uint64_t mrs_size = int128_get64(section->size);
578 uint64_t mrs_gpa = section->offset_within_address_space;
579 uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
580 section->offset_within_region;
581 RAMBlock *mrs_rb = section->mr->ram_block;
582
583 trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
584 mrs_host);
585
586 if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) {
587 /* Round the section to it's page size */
588 /* First align the start down to a page boundary */
589 size_t mrs_page = qemu_ram_pagesize(mrs_rb);
590 uint64_t alignage = mrs_host & (mrs_page - 1);
591 if (alignage) {
592 mrs_host -= alignage;
593 mrs_size += alignage;
594 mrs_gpa -= alignage;
595 }
596 /* Now align the size up to a page boundary */
597 alignage = mrs_size & (mrs_page - 1);
598 if (alignage) {
599 mrs_size += mrs_page - alignage;
600 }
601 trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa,
602 mrs_size, mrs_host);
603 }
604
605 if (dev->n_tmp_sections) {
606 /* Since we already have at least one section, lets see if
607 * this extends it; since we're scanning in order, we only
608 * have to look at the last one, and the FlatView that calls
609 * us shouldn't have overlaps.
610 */
611 MemoryRegionSection *prev_sec = dev->tmp_sections +
612 (dev->n_tmp_sections - 1);
613 uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
614 uint64_t prev_size = int128_get64(prev_sec->size);
615 uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size);
616 uint64_t prev_host_start =
617 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
618 prev_sec->offset_within_region;
619 uint64_t prev_host_end = range_get_last(prev_host_start, prev_size);
620
621 if (mrs_gpa <= (prev_gpa_end + 1)) {
622 /* OK, looks like overlapping/intersecting - it's possible that
623 * the rounding to page sizes has made them overlap, but they should
624 * match up in the same RAMBlock if they do.
625 */
626 if (mrs_gpa < prev_gpa_start) {
627 error_report("%s:Section '%s' rounded to %"PRIx64
628 " prior to previous '%s' %"PRIx64,
629 __func__, section->mr->name, mrs_gpa,
630 prev_sec->mr->name, prev_gpa_start);
631 /* A way to cleanly fail here would be better */
632 return;
633 }
634 /* Offset from the start of the previous GPA to this GPA */
635 size_t offset = mrs_gpa - prev_gpa_start;
636
637 if (prev_host_start + offset == mrs_host &&
638 section->mr == prev_sec->mr &&
639 (!dev->vhost_ops->vhost_backend_can_merge ||
640 dev->vhost_ops->vhost_backend_can_merge(dev,
641 mrs_host, mrs_size,
642 prev_host_start, prev_size))) {
643 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
644 need_add = false;
645 prev_sec->offset_within_address_space =
646 MIN(prev_gpa_start, mrs_gpa);
647 prev_sec->offset_within_region =
648 MIN(prev_host_start, mrs_host) -
649 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
650 prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
651 mrs_host));
652 trace_vhost_region_add_section_merge(section->mr->name,
653 int128_get64(prev_sec->size),
654 prev_sec->offset_within_address_space,
655 prev_sec->offset_within_region);
656 } else {
657 /* adjoining regions are fine, but overlapping ones with
658 * different blocks/offsets shouldn't happen
659 */
660 if (mrs_gpa != prev_gpa_end + 1) {
661 error_report("%s: Overlapping but not coherent sections "
662 "at %"PRIx64,
663 __func__, mrs_gpa);
664 return;
665 }
666 }
667 }
668 }
669
670 if (need_add) {
671 ++dev->n_tmp_sections;
672 dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
673 dev->n_tmp_sections);
674 dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
675 /* The flatview isn't stable and we don't use it, making it NULL
676 * means we can memcmp the list.
677 */
678 dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
679 memory_region_ref(section->mr);
680 }
681 }
682
683 /* Used for both add and nop callbacks */
684 static void vhost_region_addnop(MemoryListener *listener,
685 MemoryRegionSection *section)
686 {
687 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
688 memory_listener);
689
690 if (!vhost_section(dev, section)) {
691 return;
692 }
693 vhost_region_add_section(dev, section);
694 }
695
696 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
697 {
698 struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
699 struct vhost_dev *hdev = iommu->hdev;
700 hwaddr iova = iotlb->iova + iommu->iommu_offset;
701
702 if (vhost_backend_invalidate_device_iotlb(hdev, iova,
703 iotlb->addr_mask + 1)) {
704 error_report("Fail to invalidate device iotlb");
705 }
706 }
707
708 static void vhost_iommu_region_add(MemoryListener *listener,
709 MemoryRegionSection *section)
710 {
711 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
712 iommu_listener);
713 struct vhost_iommu *iommu;
714 Int128 end;
715 int iommu_idx, ret;
716 IOMMUMemoryRegion *iommu_mr;
717 Error *err = NULL;
718
719 if (!memory_region_is_iommu(section->mr)) {
720 return;
721 }
722
723 iommu_mr = IOMMU_MEMORY_REGION(section->mr);
724
725 iommu = g_malloc0(sizeof(*iommu));
726 end = int128_add(int128_make64(section->offset_within_region),
727 section->size);
728 end = int128_sub(end, int128_one());
729 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
730 MEMTXATTRS_UNSPECIFIED);
731 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
732 IOMMU_NOTIFIER_UNMAP,
733 section->offset_within_region,
734 int128_get64(end),
735 iommu_idx);
736 iommu->mr = section->mr;
737 iommu->iommu_offset = section->offset_within_address_space -
738 section->offset_within_region;
739 iommu->hdev = dev;
740 ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, &err);
741 if (ret) {
742 error_report_err(err);
743 exit(1);
744 }
745 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
746 /* TODO: can replay help performance here? */
747 }
748
749 static void vhost_iommu_region_del(MemoryListener *listener,
750 MemoryRegionSection *section)
751 {
752 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
753 iommu_listener);
754 struct vhost_iommu *iommu;
755
756 if (!memory_region_is_iommu(section->mr)) {
757 return;
758 }
759
760 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
761 if (iommu->mr == section->mr &&
762 iommu->n.start == section->offset_within_region) {
763 memory_region_unregister_iommu_notifier(iommu->mr,
764 &iommu->n);
765 QLIST_REMOVE(iommu, iommu_next);
766 g_free(iommu);
767 break;
768 }
769 }
770 }
771
772 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
773 struct vhost_virtqueue *vq,
774 unsigned idx, bool enable_log)
775 {
776 struct vhost_vring_addr addr;
777 int r;
778 memset(&addr, 0, sizeof(struct vhost_vring_addr));
779
780 if (dev->vhost_ops->vhost_vq_get_addr) {
781 r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq);
782 if (r < 0) {
783 VHOST_OPS_DEBUG("vhost_vq_get_addr failed");
784 return -errno;
785 }
786 } else {
787 addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc;
788 addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail;
789 addr.used_user_addr = (uint64_t)(unsigned long)vq->used;
790 }
791 addr.index = idx;
792 addr.log_guest_addr = vq->used_phys;
793 addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0;
794 r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
795 if (r < 0) {
796 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
797 return -errno;
798 }
799 return 0;
800 }
801
802 static int vhost_dev_set_features(struct vhost_dev *dev,
803 bool enable_log)
804 {
805 uint64_t features = dev->acked_features;
806 int r;
807 if (enable_log) {
808 features |= 0x1ULL << VHOST_F_LOG_ALL;
809 }
810 if (!vhost_dev_has_iommu(dev)) {
811 features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM);
812 }
813 if (dev->vhost_ops->vhost_force_iommu) {
814 if (dev->vhost_ops->vhost_force_iommu(dev) == true) {
815 features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM;
816 }
817 }
818 r = dev->vhost_ops->vhost_set_features(dev, features);
819 if (r < 0) {
820 VHOST_OPS_DEBUG("vhost_set_features failed");
821 }
822 return r < 0 ? -errno : 0;
823 }
824
825 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
826 {
827 int r, i, idx;
828 r = vhost_dev_set_features(dev, enable_log);
829 if (r < 0) {
830 goto err_features;
831 }
832 for (i = 0; i < dev->nvqs; ++i) {
833 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
834 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
835 enable_log);
836 if (r < 0) {
837 goto err_vq;
838 }
839 }
840 return 0;
841 err_vq:
842 for (; i >= 0; --i) {
843 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
844 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
845 dev->log_enabled);
846 }
847 vhost_dev_set_features(dev, dev->log_enabled);
848 err_features:
849 return r;
850 }
851
852 static int vhost_migration_log(MemoryListener *listener, bool enable)
853 {
854 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
855 memory_listener);
856 int r;
857 if (enable == dev->log_enabled) {
858 return 0;
859 }
860 if (!dev->started) {
861 dev->log_enabled = enable;
862 return 0;
863 }
864 if (!enable) {
865 r = vhost_dev_set_log(dev, false);
866 if (r < 0) {
867 return r;
868 }
869 vhost_log_put(dev, false);
870 } else {
871 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
872 r = vhost_dev_set_log(dev, true);
873 if (r < 0) {
874 return r;
875 }
876 }
877 dev->log_enabled = enable;
878 return 0;
879 }
880
881 static void vhost_log_global_start(MemoryListener *listener)
882 {
883 int r;
884
885 r = vhost_migration_log(listener, true);
886 if (r < 0) {
887 abort();
888 }
889 }
890
891 static void vhost_log_global_stop(MemoryListener *listener)
892 {
893 int r;
894
895 r = vhost_migration_log(listener, false);
896 if (r < 0) {
897 abort();
898 }
899 }
900
901 static void vhost_log_start(MemoryListener *listener,
902 MemoryRegionSection *section,
903 int old, int new)
904 {
905 /* FIXME: implement */
906 }
907
908 static void vhost_log_stop(MemoryListener *listener,
909 MemoryRegionSection *section,
910 int old, int new)
911 {
912 /* FIXME: implement */
913 }
914
915 /* The vhost driver natively knows how to handle the vrings of non
916 * cross-endian legacy devices and modern devices. Only legacy devices
917 * exposed to a bi-endian guest may require the vhost driver to use a
918 * specific endianness.
919 */
920 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
921 {
922 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
923 return false;
924 }
925 #ifdef HOST_WORDS_BIGENDIAN
926 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
927 #else
928 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
929 #endif
930 }
931
932 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
933 bool is_big_endian,
934 int vhost_vq_index)
935 {
936 struct vhost_vring_state s = {
937 .index = vhost_vq_index,
938 .num = is_big_endian
939 };
940
941 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
942 return 0;
943 }
944
945 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
946 if (errno == ENOTTY) {
947 error_report("vhost does not support cross-endian");
948 return -ENOSYS;
949 }
950
951 return -errno;
952 }
953
954 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
955 uint64_t gpa, uint64_t *uaddr,
956 uint64_t *len)
957 {
958 int i;
959
960 for (i = 0; i < hdev->mem->nregions; i++) {
961 struct vhost_memory_region *reg = hdev->mem->regions + i;
962
963 if (gpa >= reg->guest_phys_addr &&
964 reg->guest_phys_addr + reg->memory_size > gpa) {
965 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
966 *len = reg->guest_phys_addr + reg->memory_size - gpa;
967 return 0;
968 }
969 }
970
971 return -EFAULT;
972 }
973
974 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
975 {
976 IOMMUTLBEntry iotlb;
977 uint64_t uaddr, len;
978 int ret = -EFAULT;
979
980 RCU_READ_LOCK_GUARD();
981
982 trace_vhost_iotlb_miss(dev, 1);
983
984 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
985 iova, write,
986 MEMTXATTRS_UNSPECIFIED);
987 if (iotlb.target_as != NULL) {
988 ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
989 &uaddr, &len);
990 if (ret) {
991 trace_vhost_iotlb_miss(dev, 3);
992 error_report("Fail to lookup the translated address "
993 "%"PRIx64, iotlb.translated_addr);
994 goto out;
995 }
996
997 len = MIN(iotlb.addr_mask + 1, len);
998 iova = iova & ~iotlb.addr_mask;
999
1000 ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
1001 len, iotlb.perm);
1002 if (ret) {
1003 trace_vhost_iotlb_miss(dev, 4);
1004 error_report("Fail to update device iotlb");
1005 goto out;
1006 }
1007 }
1008
1009 trace_vhost_iotlb_miss(dev, 2);
1010
1011 out:
1012 return ret;
1013 }
1014
1015 static int vhost_virtqueue_start(struct vhost_dev *dev,
1016 struct VirtIODevice *vdev,
1017 struct vhost_virtqueue *vq,
1018 unsigned idx)
1019 {
1020 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1021 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1022 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1023 hwaddr s, l, a;
1024 int r;
1025 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1026 struct vhost_vring_file file = {
1027 .index = vhost_vq_index
1028 };
1029 struct vhost_vring_state state = {
1030 .index = vhost_vq_index
1031 };
1032 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
1033
1034 a = virtio_queue_get_desc_addr(vdev, idx);
1035 if (a == 0) {
1036 /* Queue might not be ready for start */
1037 return 0;
1038 }
1039
1040 vq->num = state.num = virtio_queue_get_num(vdev, idx);
1041 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
1042 if (r) {
1043 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
1044 return -errno;
1045 }
1046
1047 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1048 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
1049 if (r) {
1050 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
1051 return -errno;
1052 }
1053
1054 if (vhost_needs_vring_endian(vdev)) {
1055 r = vhost_virtqueue_set_vring_endian_legacy(dev,
1056 virtio_is_big_endian(vdev),
1057 vhost_vq_index);
1058 if (r) {
1059 return -errno;
1060 }
1061 }
1062
1063 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1064 vq->desc_phys = a;
1065 vq->desc = vhost_memory_map(dev, a, &l, false);
1066 if (!vq->desc || l != s) {
1067 r = -ENOMEM;
1068 goto fail_alloc_desc;
1069 }
1070 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1071 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1072 vq->avail = vhost_memory_map(dev, a, &l, false);
1073 if (!vq->avail || l != s) {
1074 r = -ENOMEM;
1075 goto fail_alloc_avail;
1076 }
1077 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1078 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1079 vq->used = vhost_memory_map(dev, a, &l, true);
1080 if (!vq->used || l != s) {
1081 r = -ENOMEM;
1082 goto fail_alloc_used;
1083 }
1084
1085 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1086 if (r < 0) {
1087 r = -errno;
1088 goto fail_alloc;
1089 }
1090
1091 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1092 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1093 if (r) {
1094 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1095 r = -errno;
1096 goto fail_kick;
1097 }
1098
1099 /* Clear and discard previous events if any. */
1100 event_notifier_test_and_clear(&vq->masked_notifier);
1101
1102 /* Init vring in unmasked state, unless guest_notifier_mask
1103 * will do it later.
1104 */
1105 if (!vdev->use_guest_notifier_mask) {
1106 /* TODO: check and handle errors. */
1107 vhost_virtqueue_mask(dev, vdev, idx, false);
1108 }
1109
1110 if (k->query_guest_notifiers &&
1111 k->query_guest_notifiers(qbus->parent) &&
1112 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1113 file.fd = -1;
1114 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1115 if (r) {
1116 goto fail_vector;
1117 }
1118 }
1119
1120 return 0;
1121
1122 fail_vector:
1123 fail_kick:
1124 fail_alloc:
1125 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1126 0, 0);
1127 fail_alloc_used:
1128 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1129 0, 0);
1130 fail_alloc_avail:
1131 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1132 0, 0);
1133 fail_alloc_desc:
1134 return r;
1135 }
1136
1137 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1138 struct VirtIODevice *vdev,
1139 struct vhost_virtqueue *vq,
1140 unsigned idx)
1141 {
1142 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1143 struct vhost_vring_state state = {
1144 .index = vhost_vq_index,
1145 };
1146 int r;
1147
1148 if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
1149 /* Don't stop the virtqueue which might have not been started */
1150 return;
1151 }
1152
1153 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1154 if (r < 0) {
1155 VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r);
1156 /* Connection to the backend is broken, so let's sync internal
1157 * last avail idx to the device used idx.
1158 */
1159 virtio_queue_restore_last_avail_idx(vdev, idx);
1160 } else {
1161 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1162 }
1163 virtio_queue_invalidate_signalled_used(vdev, idx);
1164 virtio_queue_update_used_idx(vdev, idx);
1165
1166 /* In the cross-endian case, we need to reset the vring endianness to
1167 * native as legacy devices expect so by default.
1168 */
1169 if (vhost_needs_vring_endian(vdev)) {
1170 vhost_virtqueue_set_vring_endian_legacy(dev,
1171 !virtio_is_big_endian(vdev),
1172 vhost_vq_index);
1173 }
1174
1175 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1176 1, virtio_queue_get_used_size(vdev, idx));
1177 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1178 0, virtio_queue_get_avail_size(vdev, idx));
1179 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1180 0, virtio_queue_get_desc_size(vdev, idx));
1181 }
1182
1183 static void vhost_eventfd_add(MemoryListener *listener,
1184 MemoryRegionSection *section,
1185 bool match_data, uint64_t data, EventNotifier *e)
1186 {
1187 }
1188
1189 static void vhost_eventfd_del(MemoryListener *listener,
1190 MemoryRegionSection *section,
1191 bool match_data, uint64_t data, EventNotifier *e)
1192 {
1193 }
1194
1195 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1196 int n, uint32_t timeout)
1197 {
1198 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1199 struct vhost_vring_state state = {
1200 .index = vhost_vq_index,
1201 .num = timeout,
1202 };
1203 int r;
1204
1205 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1206 return -EINVAL;
1207 }
1208
1209 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1210 if (r) {
1211 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1212 return r;
1213 }
1214
1215 return 0;
1216 }
1217
1218 static int vhost_virtqueue_init(struct vhost_dev *dev,
1219 struct vhost_virtqueue *vq, int n)
1220 {
1221 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1222 struct vhost_vring_file file = {
1223 .index = vhost_vq_index,
1224 };
1225 int r = event_notifier_init(&vq->masked_notifier, 0);
1226 if (r < 0) {
1227 return r;
1228 }
1229
1230 file.fd = event_notifier_get_fd(&vq->masked_notifier);
1231 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1232 if (r) {
1233 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1234 r = -errno;
1235 goto fail_call;
1236 }
1237
1238 vq->dev = dev;
1239
1240 return 0;
1241 fail_call:
1242 event_notifier_cleanup(&vq->masked_notifier);
1243 return r;
1244 }
1245
1246 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1247 {
1248 event_notifier_cleanup(&vq->masked_notifier);
1249 }
1250
1251 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1252 VhostBackendType backend_type, uint32_t busyloop_timeout)
1253 {
1254 uint64_t features;
1255 int i, r, n_initialized_vqs = 0;
1256 Error *local_err = NULL;
1257
1258 hdev->vdev = NULL;
1259 hdev->migration_blocker = NULL;
1260
1261 r = vhost_set_backend_type(hdev, backend_type);
1262 assert(r >= 0);
1263
1264 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1265 if (r < 0) {
1266 goto fail;
1267 }
1268
1269 r = hdev->vhost_ops->vhost_set_owner(hdev);
1270 if (r < 0) {
1271 VHOST_OPS_DEBUG("vhost_set_owner failed");
1272 goto fail;
1273 }
1274
1275 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1276 if (r < 0) {
1277 VHOST_OPS_DEBUG("vhost_get_features failed");
1278 goto fail;
1279 }
1280
1281 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1282 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1283 if (r < 0) {
1284 goto fail;
1285 }
1286 }
1287
1288 if (busyloop_timeout) {
1289 for (i = 0; i < hdev->nvqs; ++i) {
1290 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1291 busyloop_timeout);
1292 if (r < 0) {
1293 goto fail_busyloop;
1294 }
1295 }
1296 }
1297
1298 hdev->features = features;
1299
1300 hdev->memory_listener = (MemoryListener) {
1301 .begin = vhost_begin,
1302 .commit = vhost_commit,
1303 .region_add = vhost_region_addnop,
1304 .region_nop = vhost_region_addnop,
1305 .log_start = vhost_log_start,
1306 .log_stop = vhost_log_stop,
1307 .log_sync = vhost_log_sync,
1308 .log_global_start = vhost_log_global_start,
1309 .log_global_stop = vhost_log_global_stop,
1310 .eventfd_add = vhost_eventfd_add,
1311 .eventfd_del = vhost_eventfd_del,
1312 .priority = 10
1313 };
1314
1315 hdev->iommu_listener = (MemoryListener) {
1316 .region_add = vhost_iommu_region_add,
1317 .region_del = vhost_iommu_region_del,
1318 };
1319
1320 if (hdev->migration_blocker == NULL) {
1321 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1322 error_setg(&hdev->migration_blocker,
1323 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1324 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
1325 error_setg(&hdev->migration_blocker,
1326 "Migration disabled: failed to allocate shared memory");
1327 }
1328 }
1329
1330 if (hdev->migration_blocker != NULL) {
1331 r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1332 if (local_err) {
1333 error_report_err(local_err);
1334 error_free(hdev->migration_blocker);
1335 goto fail_busyloop;
1336 }
1337 }
1338
1339 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1340 hdev->n_mem_sections = 0;
1341 hdev->mem_sections = NULL;
1342 hdev->log = NULL;
1343 hdev->log_size = 0;
1344 hdev->log_enabled = false;
1345 hdev->started = false;
1346 memory_listener_register(&hdev->memory_listener, &address_space_memory);
1347 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1348
1349 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1350 error_report("vhost backend memory slots limit is less"
1351 " than current number of present memory slots");
1352 r = -1;
1353 if (busyloop_timeout) {
1354 goto fail_busyloop;
1355 } else {
1356 goto fail;
1357 }
1358 }
1359
1360 return 0;
1361
1362 fail_busyloop:
1363 while (--i >= 0) {
1364 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1365 }
1366 fail:
1367 hdev->nvqs = n_initialized_vqs;
1368 vhost_dev_cleanup(hdev);
1369 return r;
1370 }
1371
1372 void vhost_dev_cleanup(struct vhost_dev *hdev)
1373 {
1374 int i;
1375
1376 for (i = 0; i < hdev->nvqs; ++i) {
1377 vhost_virtqueue_cleanup(hdev->vqs + i);
1378 }
1379 if (hdev->mem) {
1380 /* those are only safe after successful init */
1381 memory_listener_unregister(&hdev->memory_listener);
1382 QLIST_REMOVE(hdev, entry);
1383 }
1384 if (hdev->migration_blocker) {
1385 migrate_del_blocker(hdev->migration_blocker);
1386 error_free(hdev->migration_blocker);
1387 }
1388 g_free(hdev->mem);
1389 g_free(hdev->mem_sections);
1390 if (hdev->vhost_ops) {
1391 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1392 }
1393 assert(!hdev->log);
1394
1395 memset(hdev, 0, sizeof(struct vhost_dev));
1396 }
1397
1398 /* Stop processing guest IO notifications in qemu.
1399 * Start processing them in vhost in kernel.
1400 */
1401 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1402 {
1403 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1404 int i, r, e;
1405
1406 /* We will pass the notifiers to the kernel, make sure that QEMU
1407 * doesn't interfere.
1408 */
1409 r = virtio_device_grab_ioeventfd(vdev);
1410 if (r < 0) {
1411 error_report("binding does not support host notifiers");
1412 goto fail;
1413 }
1414
1415 for (i = 0; i < hdev->nvqs; ++i) {
1416 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1417 true);
1418 if (r < 0) {
1419 error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1420 goto fail_vq;
1421 }
1422 }
1423
1424 return 0;
1425 fail_vq:
1426 while (--i >= 0) {
1427 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1428 false);
1429 if (e < 0) {
1430 error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1431 }
1432 assert (e >= 0);
1433 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1434 }
1435 virtio_device_release_ioeventfd(vdev);
1436 fail:
1437 return r;
1438 }
1439
1440 /* Stop processing guest IO notifications in vhost.
1441 * Start processing them in qemu.
1442 * This might actually run the qemu handlers right away,
1443 * so virtio in qemu must be completely setup when this is called.
1444 */
1445 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1446 {
1447 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1448 int i, r;
1449
1450 for (i = 0; i < hdev->nvqs; ++i) {
1451 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1452 false);
1453 if (r < 0) {
1454 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1455 }
1456 assert (r >= 0);
1457 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1458 }
1459 virtio_device_release_ioeventfd(vdev);
1460 }
1461
1462 /* Test and clear event pending status.
1463 * Should be called after unmask to avoid losing events.
1464 */
1465 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1466 {
1467 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1468 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1469 return event_notifier_test_and_clear(&vq->masked_notifier);
1470 }
1471
1472 /* Mask/unmask events from this vq. */
1473 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1474 bool mask)
1475 {
1476 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1477 int r, index = n - hdev->vq_index;
1478 struct vhost_vring_file file;
1479
1480 /* should only be called after backend is connected */
1481 assert(hdev->vhost_ops);
1482
1483 if (mask) {
1484 assert(vdev->use_guest_notifier_mask);
1485 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1486 } else {
1487 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1488 }
1489
1490 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1491 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1492 if (r < 0) {
1493 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1494 }
1495 }
1496
1497 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1498 uint64_t features)
1499 {
1500 const int *bit = feature_bits;
1501 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1502 uint64_t bit_mask = (1ULL << *bit);
1503 if (!(hdev->features & bit_mask)) {
1504 features &= ~bit_mask;
1505 }
1506 bit++;
1507 }
1508 return features;
1509 }
1510
1511 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1512 uint64_t features)
1513 {
1514 const int *bit = feature_bits;
1515 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1516 uint64_t bit_mask = (1ULL << *bit);
1517 if (features & bit_mask) {
1518 hdev->acked_features |= bit_mask;
1519 }
1520 bit++;
1521 }
1522 }
1523
1524 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1525 uint32_t config_len)
1526 {
1527 assert(hdev->vhost_ops);
1528
1529 if (hdev->vhost_ops->vhost_get_config) {
1530 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1531 }
1532
1533 return -1;
1534 }
1535
1536 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1537 uint32_t offset, uint32_t size, uint32_t flags)
1538 {
1539 assert(hdev->vhost_ops);
1540
1541 if (hdev->vhost_ops->vhost_set_config) {
1542 return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1543 size, flags);
1544 }
1545
1546 return -1;
1547 }
1548
1549 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1550 const VhostDevConfigOps *ops)
1551 {
1552 hdev->config_ops = ops;
1553 }
1554
1555 void vhost_dev_free_inflight(struct vhost_inflight *inflight)
1556 {
1557 if (inflight && inflight->addr) {
1558 qemu_memfd_free(inflight->addr, inflight->size, inflight->fd);
1559 inflight->addr = NULL;
1560 inflight->fd = -1;
1561 }
1562 }
1563
1564 static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
1565 uint64_t new_size)
1566 {
1567 Error *err = NULL;
1568 int fd = -1;
1569 void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
1570 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1571 &fd, &err);
1572
1573 if (err) {
1574 error_report_err(err);
1575 return -1;
1576 }
1577
1578 vhost_dev_free_inflight(inflight);
1579 inflight->offset = 0;
1580 inflight->addr = addr;
1581 inflight->fd = fd;
1582 inflight->size = new_size;
1583
1584 return 0;
1585 }
1586
1587 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1588 {
1589 if (inflight->addr) {
1590 qemu_put_be64(f, inflight->size);
1591 qemu_put_be16(f, inflight->queue_size);
1592 qemu_put_buffer(f, inflight->addr, inflight->size);
1593 } else {
1594 qemu_put_be64(f, 0);
1595 }
1596 }
1597
1598 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1599 {
1600 uint64_t size;
1601
1602 size = qemu_get_be64(f);
1603 if (!size) {
1604 return 0;
1605 }
1606
1607 if (inflight->size != size) {
1608 if (vhost_dev_resize_inflight(inflight, size)) {
1609 return -1;
1610 }
1611 }
1612 inflight->queue_size = qemu_get_be16(f);
1613
1614 qemu_get_buffer(f, inflight->addr, size);
1615
1616 return 0;
1617 }
1618
1619 int vhost_dev_set_inflight(struct vhost_dev *dev,
1620 struct vhost_inflight *inflight)
1621 {
1622 int r;
1623
1624 if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
1625 r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
1626 if (r) {
1627 VHOST_OPS_DEBUG("vhost_set_inflight_fd failed");
1628 return -errno;
1629 }
1630 }
1631
1632 return 0;
1633 }
1634
1635 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
1636 struct vhost_inflight *inflight)
1637 {
1638 int r;
1639
1640 if (dev->vhost_ops->vhost_get_inflight_fd) {
1641 r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
1642 if (r) {
1643 VHOST_OPS_DEBUG("vhost_get_inflight_fd failed");
1644 return -errno;
1645 }
1646 }
1647
1648 return 0;
1649 }
1650
1651 /* Host notifiers must be enabled at this point. */
1652 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1653 {
1654 int i, r;
1655
1656 /* should only be called after backend is connected */
1657 assert(hdev->vhost_ops);
1658
1659 hdev->started = true;
1660 hdev->vdev = vdev;
1661
1662 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1663 if (r < 0) {
1664 goto fail_features;
1665 }
1666
1667 if (vhost_dev_has_iommu(hdev)) {
1668 memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1669 }
1670
1671 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1672 if (r < 0) {
1673 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1674 r = -errno;
1675 goto fail_mem;
1676 }
1677 for (i = 0; i < hdev->nvqs; ++i) {
1678 r = vhost_virtqueue_start(hdev,
1679 vdev,
1680 hdev->vqs + i,
1681 hdev->vq_index + i);
1682 if (r < 0) {
1683 goto fail_vq;
1684 }
1685 }
1686
1687 if (hdev->log_enabled) {
1688 uint64_t log_base;
1689
1690 hdev->log_size = vhost_get_log_size(hdev);
1691 hdev->log = vhost_log_get(hdev->log_size,
1692 vhost_dev_log_is_shared(hdev));
1693 log_base = (uintptr_t)hdev->log->log;
1694 r = hdev->vhost_ops->vhost_set_log_base(hdev,
1695 hdev->log_size ? log_base : 0,
1696 hdev->log);
1697 if (r < 0) {
1698 VHOST_OPS_DEBUG("vhost_set_log_base failed");
1699 r = -errno;
1700 goto fail_log;
1701 }
1702 }
1703 if (hdev->vhost_ops->vhost_dev_start) {
1704 r = hdev->vhost_ops->vhost_dev_start(hdev, true);
1705 if (r) {
1706 goto fail_log;
1707 }
1708 }
1709 if (vhost_dev_has_iommu(hdev) &&
1710 hdev->vhost_ops->vhost_set_iotlb_callback) {
1711 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1712
1713 /* Update used ring information for IOTLB to work correctly,
1714 * vhost-kernel code requires for this.*/
1715 for (i = 0; i < hdev->nvqs; ++i) {
1716 struct vhost_virtqueue *vq = hdev->vqs + i;
1717 vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1718 }
1719 }
1720 return 0;
1721 fail_log:
1722 vhost_log_put(hdev, false);
1723 fail_vq:
1724 while (--i >= 0) {
1725 vhost_virtqueue_stop(hdev,
1726 vdev,
1727 hdev->vqs + i,
1728 hdev->vq_index + i);
1729 }
1730
1731 fail_mem:
1732 fail_features:
1733
1734 hdev->started = false;
1735 return r;
1736 }
1737
1738 /* Host notifiers must be enabled at this point. */
1739 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1740 {
1741 int i;
1742
1743 /* should only be called after backend is connected */
1744 assert(hdev->vhost_ops);
1745
1746 if (hdev->vhost_ops->vhost_dev_start) {
1747 hdev->vhost_ops->vhost_dev_start(hdev, false);
1748 }
1749 for (i = 0; i < hdev->nvqs; ++i) {
1750 vhost_virtqueue_stop(hdev,
1751 vdev,
1752 hdev->vqs + i,
1753 hdev->vq_index + i);
1754 }
1755
1756 if (vhost_dev_has_iommu(hdev)) {
1757 if (hdev->vhost_ops->vhost_set_iotlb_callback) {
1758 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1759 }
1760 memory_listener_unregister(&hdev->iommu_listener);
1761 }
1762 vhost_log_put(hdev, true);
1763 hdev->started = false;
1764 hdev->vdev = NULL;
1765 }
1766
1767 int vhost_net_set_backend(struct vhost_dev *hdev,
1768 struct vhost_vring_file *file)
1769 {
1770 if (hdev->vhost_ops->vhost_net_set_backend) {
1771 return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1772 }
1773
1774 return -1;
1775 }