Merge tag 'edgar/xilinx-next-2022-09-21.for-upstream' of https://github.com/edgarigl...
[qemu.git] / hw / virtio / vhost-vdpa.c
1 /*
2 * vhost-vdpa
3 *
4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 *
10 */
11
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "trace.h"
25 #include "qemu-common.h"
26
27 /*
28 * Return one past the end of the end of section. Be careful with uint64_t
29 * conversions!
30 */
31 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section)
32 {
33 Int128 llend = int128_make64(section->offset_within_address_space);
34 llend = int128_add(llend, section->size);
35 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
36
37 return llend;
38 }
39
40 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
41 uint64_t iova_min,
42 uint64_t iova_max)
43 {
44 Int128 llend;
45
46 if ((!memory_region_is_ram(section->mr) &&
47 !memory_region_is_iommu(section->mr)) ||
48 memory_region_is_protected(section->mr) ||
49 /* vhost-vDPA doesn't allow MMIO to be mapped */
50 memory_region_is_ram_device(section->mr)) {
51 return true;
52 }
53
54 if (section->offset_within_address_space < iova_min) {
55 error_report("RAM section out of device range (min=0x%" PRIx64
56 ", addr=0x%" HWADDR_PRIx ")",
57 iova_min, section->offset_within_address_space);
58 return true;
59 }
60
61 llend = vhost_vdpa_section_end(section);
62 if (int128_gt(llend, int128_make64(iova_max))) {
63 error_report("RAM section out of device range (max=0x%" PRIx64
64 ", end addr=0x%" PRIx64 ")",
65 iova_max, int128_get64(llend));
66 return true;
67 }
68
69 return false;
70 }
71
72 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
73 void *vaddr, bool readonly)
74 {
75 struct vhost_msg_v2 msg = {};
76 int fd = v->device_fd;
77 int ret = 0;
78
79 msg.type = v->msg_type;
80 msg.iotlb.iova = iova;
81 msg.iotlb.size = size;
82 msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
83 msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
84 msg.iotlb.type = VHOST_IOTLB_UPDATE;
85
86 trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
87 msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
88
89 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
90 error_report("failed to write, fd=%d, errno=%d (%s)",
91 fd, errno, strerror(errno));
92 return -EIO ;
93 }
94
95 return ret;
96 }
97
98 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
99 hwaddr size)
100 {
101 struct vhost_msg_v2 msg = {};
102 int fd = v->device_fd;
103 int ret = 0;
104
105 msg.type = v->msg_type;
106 msg.iotlb.iova = iova;
107 msg.iotlb.size = size;
108 msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
109
110 trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
111 msg.iotlb.size, msg.iotlb.type);
112
113 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
114 error_report("failed to write, fd=%d, errno=%d (%s)",
115 fd, errno, strerror(errno));
116 return -EIO ;
117 }
118
119 return ret;
120 }
121
122 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
123 {
124 int fd = v->device_fd;
125 struct vhost_msg_v2 msg = {
126 .type = v->msg_type,
127 .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
128 };
129
130 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
131 error_report("failed to write, fd=%d, errno=%d (%s)",
132 fd, errno, strerror(errno));
133 }
134 }
135
136 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
137 {
138 if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
139 !v->iotlb_batch_begin_sent) {
140 vhost_vdpa_listener_begin_batch(v);
141 }
142
143 v->iotlb_batch_begin_sent = true;
144 }
145
146 static void vhost_vdpa_listener_commit(MemoryListener *listener)
147 {
148 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
149 struct vhost_dev *dev = v->dev;
150 struct vhost_msg_v2 msg = {};
151 int fd = v->device_fd;
152
153 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
154 return;
155 }
156
157 if (!v->iotlb_batch_begin_sent) {
158 return;
159 }
160
161 msg.type = v->msg_type;
162 msg.iotlb.type = VHOST_IOTLB_BATCH_END;
163
164 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
165 error_report("failed to write, fd=%d, errno=%d (%s)",
166 fd, errno, strerror(errno));
167 }
168
169 v->iotlb_batch_begin_sent = false;
170 }
171
172 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
173 MemoryRegionSection *section)
174 {
175 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
176 hwaddr iova;
177 Int128 llend, llsize;
178 void *vaddr;
179 int ret;
180
181 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
182 v->iova_range.last)) {
183 return;
184 }
185
186 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
187 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
188 error_report("%s received unaligned region", __func__);
189 return;
190 }
191
192 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
193 llend = vhost_vdpa_section_end(section);
194 if (int128_ge(int128_make64(iova), llend)) {
195 return;
196 }
197
198 memory_region_ref(section->mr);
199
200 /* Here we assume that memory_region_is_ram(section->mr)==true */
201
202 vaddr = memory_region_get_ram_ptr(section->mr) +
203 section->offset_within_region +
204 (iova - section->offset_within_address_space);
205
206 trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
207 vaddr, section->readonly);
208
209 llsize = int128_sub(llend, int128_make64(iova));
210
211 vhost_vdpa_iotlb_batch_begin_once(v);
212 ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
213 vaddr, section->readonly);
214 if (ret) {
215 error_report("vhost vdpa map fail!");
216 goto fail;
217 }
218
219 return;
220
221 fail:
222 /*
223 * On the initfn path, store the first error in the container so we
224 * can gracefully fail. Runtime, there's not much we can do other
225 * than throw a hardware error.
226 */
227 error_report("vhost-vdpa: DMA mapping failed, unable to continue");
228 return;
229
230 }
231
232 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
233 MemoryRegionSection *section)
234 {
235 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
236 hwaddr iova;
237 Int128 llend, llsize;
238 int ret;
239
240 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
241 v->iova_range.last)) {
242 return;
243 }
244
245 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
246 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
247 error_report("%s received unaligned region", __func__);
248 return;
249 }
250
251 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
252 llend = vhost_vdpa_section_end(section);
253
254 trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
255
256 if (int128_ge(int128_make64(iova), llend)) {
257 return;
258 }
259
260 llsize = int128_sub(llend, int128_make64(iova));
261
262 vhost_vdpa_iotlb_batch_begin_once(v);
263 ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
264 if (ret) {
265 error_report("vhost_vdpa dma unmap error!");
266 }
267
268 memory_region_unref(section->mr);
269 }
270 /*
271 * IOTLB API is used by vhost-vpda which requires incremental updating
272 * of the mapping. So we can not use generic vhost memory listener which
273 * depends on the addnop().
274 */
275 static const MemoryListener vhost_vdpa_memory_listener = {
276 .name = "vhost-vdpa",
277 .commit = vhost_vdpa_listener_commit,
278 .region_add = vhost_vdpa_listener_region_add,
279 .region_del = vhost_vdpa_listener_region_del,
280 };
281
282 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
283 void *arg)
284 {
285 struct vhost_vdpa *v = dev->opaque;
286 int fd = v->device_fd;
287 int ret;
288
289 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
290
291 ret = ioctl(fd, request, arg);
292 return ret < 0 ? -errno : ret;
293 }
294
295 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
296 {
297 uint8_t s;
298 int ret;
299
300 trace_vhost_vdpa_add_status(dev, status);
301 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
302 if (ret < 0) {
303 return ret;
304 }
305
306 s |= status;
307
308 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
309 if (ret < 0) {
310 return ret;
311 }
312
313 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
314 if (ret < 0) {
315 return ret;
316 }
317
318 if (!(s & status)) {
319 return -EIO;
320 }
321
322 return 0;
323 }
324
325 static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
326 {
327 int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
328 &v->iova_range);
329 if (ret != 0) {
330 v->iova_range.first = 0;
331 v->iova_range.last = UINT64_MAX;
332 }
333
334 trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
335 v->iova_range.last);
336 }
337
338 static bool vhost_vdpa_one_time_request(struct vhost_dev *dev)
339 {
340 struct vhost_vdpa *v = dev->opaque;
341
342 return v->index != 0;
343 }
344
345 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
346 {
347 struct vhost_vdpa *v;
348 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
349 trace_vhost_vdpa_init(dev, opaque);
350 int ret;
351
352 /*
353 * Similar to VFIO, we end up pinning all guest memory and have to
354 * disable discarding of RAM.
355 */
356 ret = ram_block_discard_disable(true);
357 if (ret) {
358 error_report("Cannot set discarding of RAM broken");
359 return ret;
360 }
361
362 v = opaque;
363 v->dev = dev;
364 dev->opaque = opaque ;
365 v->listener = vhost_vdpa_memory_listener;
366 v->msg_type = VHOST_IOTLB_MSG_V2;
367
368 vhost_vdpa_get_iova_range(v);
369
370 if (vhost_vdpa_one_time_request(dev)) {
371 return 0;
372 }
373
374 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
375 VIRTIO_CONFIG_S_DRIVER);
376
377 return 0;
378 }
379
380 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
381 int queue_index)
382 {
383 size_t page_size = qemu_real_host_page_size;
384 struct vhost_vdpa *v = dev->opaque;
385 VirtIODevice *vdev = dev->vdev;
386 VhostVDPAHostNotifier *n;
387
388 n = &v->notifier[queue_index];
389
390 if (n->addr) {
391 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
392 object_unparent(OBJECT(&n->mr));
393 munmap(n->addr, page_size);
394 n->addr = NULL;
395 }
396 }
397
398 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
399 {
400 int i;
401
402 for (i = 0; i < n; i++) {
403 vhost_vdpa_host_notifier_uninit(dev, i);
404 }
405 }
406
407 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
408 {
409 size_t page_size = qemu_real_host_page_size;
410 struct vhost_vdpa *v = dev->opaque;
411 VirtIODevice *vdev = dev->vdev;
412 VhostVDPAHostNotifier *n;
413 int fd = v->device_fd;
414 void *addr;
415 char *name;
416
417 vhost_vdpa_host_notifier_uninit(dev, queue_index);
418
419 n = &v->notifier[queue_index];
420
421 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
422 queue_index * page_size);
423 if (addr == MAP_FAILED) {
424 goto err;
425 }
426
427 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
428 v, queue_index);
429 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
430 page_size, addr);
431 g_free(name);
432
433 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
434 munmap(addr, page_size);
435 goto err;
436 }
437 n->addr = addr;
438
439 return 0;
440
441 err:
442 return -1;
443 }
444
445 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
446 {
447 int i;
448
449 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
450 if (vhost_vdpa_host_notifier_init(dev, i)) {
451 goto err;
452 }
453 }
454
455 return;
456
457 err:
458 vhost_vdpa_host_notifiers_uninit(dev, i);
459 return;
460 }
461
462 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
463 {
464 struct vhost_vdpa *v;
465 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
466 v = dev->opaque;
467 trace_vhost_vdpa_cleanup(dev, v);
468 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
469 memory_listener_unregister(&v->listener);
470
471 dev->opaque = NULL;
472 ram_block_discard_disable(false);
473
474 return 0;
475 }
476
477 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
478 {
479 trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
480 return INT_MAX;
481 }
482
483 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
484 struct vhost_memory *mem)
485 {
486 if (vhost_vdpa_one_time_request(dev)) {
487 return 0;
488 }
489
490 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
491 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
492 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
493 int i;
494 for (i = 0; i < mem->nregions; i++) {
495 trace_vhost_vdpa_dump_regions(dev, i,
496 mem->regions[i].guest_phys_addr,
497 mem->regions[i].memory_size,
498 mem->regions[i].userspace_addr,
499 mem->regions[i].flags_padding);
500 }
501 }
502 if (mem->padding) {
503 return -EINVAL;
504 }
505
506 return 0;
507 }
508
509 static int vhost_vdpa_set_features(struct vhost_dev *dev,
510 uint64_t features)
511 {
512 int ret;
513
514 if (vhost_vdpa_one_time_request(dev)) {
515 return 0;
516 }
517
518 trace_vhost_vdpa_set_features(dev, features);
519 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
520 if (ret) {
521 return ret;
522 }
523
524 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
525 }
526
527 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
528 {
529 uint64_t features;
530 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
531 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
532 int r;
533
534 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
535 return -EFAULT;
536 }
537
538 features &= f;
539
540 if (vhost_vdpa_one_time_request(dev)) {
541 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
542 if (r) {
543 return -EFAULT;
544 }
545 }
546
547 dev->backend_cap = features;
548
549 return 0;
550 }
551
552 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
553 uint32_t *device_id)
554 {
555 int ret;
556 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
557 trace_vhost_vdpa_get_device_id(dev, *device_id);
558 return ret;
559 }
560
561 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
562 {
563 int ret;
564 uint8_t status = 0;
565
566 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
567 trace_vhost_vdpa_reset_device(dev, status);
568 return ret;
569 }
570
571 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
572 {
573 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
574
575 trace_vhost_vdpa_get_vq_index(dev, idx, idx);
576 return idx;
577 }
578
579 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
580 {
581 int i;
582 trace_vhost_vdpa_set_vring_ready(dev);
583 for (i = 0; i < dev->nvqs; ++i) {
584 struct vhost_vring_state state = {
585 .index = dev->vq_index + i,
586 .num = 1,
587 };
588 vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
589 }
590 return 0;
591 }
592
593 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
594 uint32_t config_len)
595 {
596 int b, len;
597 char line[QEMU_HEXDUMP_LINE_LEN];
598
599 for (b = 0; b < config_len; b += 16) {
600 len = config_len - b;
601 qemu_hexdump_line(line, b, config, len, false);
602 trace_vhost_vdpa_dump_config(dev, line);
603 }
604 }
605
606 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
607 uint32_t offset, uint32_t size,
608 uint32_t flags)
609 {
610 struct vhost_vdpa_config *config;
611 int ret;
612 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
613
614 trace_vhost_vdpa_set_config(dev, offset, size, flags);
615 config = g_malloc(size + config_size);
616 config->off = offset;
617 config->len = size;
618 memcpy(config->buf, data, size);
619 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
620 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
621 vhost_vdpa_dump_config(dev, data, size);
622 }
623 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
624 g_free(config);
625 return ret;
626 }
627
628 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
629 uint32_t config_len, Error **errp)
630 {
631 struct vhost_vdpa_config *v_config;
632 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
633 int ret;
634
635 trace_vhost_vdpa_get_config(dev, config, config_len);
636 v_config = g_malloc(config_len + config_size);
637 v_config->len = config_len;
638 v_config->off = 0;
639 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
640 memcpy(config, v_config->buf, config_len);
641 g_free(v_config);
642 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
643 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
644 vhost_vdpa_dump_config(dev, config, config_len);
645 }
646 return ret;
647 }
648
649 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
650 {
651 struct vhost_vdpa *v = dev->opaque;
652 trace_vhost_vdpa_dev_start(dev, started);
653
654 if (started) {
655 vhost_vdpa_host_notifiers_init(dev);
656 vhost_vdpa_set_vring_ready(dev);
657 } else {
658 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
659 }
660
661 if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
662 return 0;
663 }
664
665 if (started) {
666 memory_listener_register(&v->listener, &address_space_memory);
667 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
668 } else {
669 vhost_vdpa_reset_device(dev);
670 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
671 VIRTIO_CONFIG_S_DRIVER);
672 memory_listener_unregister(&v->listener);
673
674 return 0;
675 }
676 }
677
678 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
679 struct vhost_log *log)
680 {
681 if (vhost_vdpa_one_time_request(dev)) {
682 return 0;
683 }
684
685 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
686 log->log);
687 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
688 }
689
690 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
691 struct vhost_vring_addr *addr)
692 {
693 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
694 addr->desc_user_addr, addr->used_user_addr,
695 addr->avail_user_addr,
696 addr->log_guest_addr);
697 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
698 }
699
700 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
701 struct vhost_vring_state *ring)
702 {
703 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
704 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
705 }
706
707 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
708 struct vhost_vring_state *ring)
709 {
710 trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
711 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
712 }
713
714 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
715 struct vhost_vring_state *ring)
716 {
717 int ret;
718
719 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
720 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
721 return ret;
722 }
723
724 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
725 struct vhost_vring_file *file)
726 {
727 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
728 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
729 }
730
731 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
732 struct vhost_vring_file *file)
733 {
734 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
735 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
736 }
737
738 static int vhost_vdpa_get_features(struct vhost_dev *dev,
739 uint64_t *features)
740 {
741 int ret;
742
743 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
744 trace_vhost_vdpa_get_features(dev, *features);
745 return ret;
746 }
747
748 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
749 {
750 if (vhost_vdpa_one_time_request(dev)) {
751 return 0;
752 }
753
754 trace_vhost_vdpa_set_owner(dev);
755 return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
756 }
757
758 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
759 struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
760 {
761 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
762 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
763 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
764 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
765 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
766 addr->avail_user_addr, addr->used_user_addr);
767 return 0;
768 }
769
770 static bool vhost_vdpa_force_iommu(struct vhost_dev *dev)
771 {
772 return true;
773 }
774
775 const VhostOps vdpa_ops = {
776 .backend_type = VHOST_BACKEND_TYPE_VDPA,
777 .vhost_backend_init = vhost_vdpa_init,
778 .vhost_backend_cleanup = vhost_vdpa_cleanup,
779 .vhost_set_log_base = vhost_vdpa_set_log_base,
780 .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
781 .vhost_set_vring_num = vhost_vdpa_set_vring_num,
782 .vhost_set_vring_base = vhost_vdpa_set_vring_base,
783 .vhost_get_vring_base = vhost_vdpa_get_vring_base,
784 .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
785 .vhost_set_vring_call = vhost_vdpa_set_vring_call,
786 .vhost_get_features = vhost_vdpa_get_features,
787 .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
788 .vhost_set_owner = vhost_vdpa_set_owner,
789 .vhost_set_vring_endian = NULL,
790 .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
791 .vhost_set_mem_table = vhost_vdpa_set_mem_table,
792 .vhost_set_features = vhost_vdpa_set_features,
793 .vhost_reset_device = vhost_vdpa_reset_device,
794 .vhost_get_vq_index = vhost_vdpa_get_vq_index,
795 .vhost_get_config = vhost_vdpa_get_config,
796 .vhost_set_config = vhost_vdpa_set_config,
797 .vhost_requires_shm_log = NULL,
798 .vhost_migration_done = NULL,
799 .vhost_backend_can_merge = NULL,
800 .vhost_net_set_mtu = NULL,
801 .vhost_set_iotlb_callback = NULL,
802 .vhost_send_device_iotlb_msg = NULL,
803 .vhost_dev_start = vhost_vdpa_dev_start,
804 .vhost_get_device_id = vhost_vdpa_get_device_id,
805 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
806 .vhost_force_iommu = vhost_vdpa_force_iommu,
807 };