Merge tag 'edgar/xilinx-next-2022-09-21.for-upstream' of https://github.com/edgarigl...
[qemu.git] / hw / virtio / virtio-iommu.c
1 /*
2 * virtio-iommu device
3 *
4 * Copyright (c) 2020 Red Hat, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/iov.h"
23 #include "qemu-common.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/virtio/virtio.h"
26 #include "sysemu/kvm.h"
27 #include "qapi/error.h"
28 #include "qemu/error-report.h"
29 #include "trace.h"
30
31 #include "standard-headers/linux/virtio_ids.h"
32
33 #include "hw/virtio/virtio-bus.h"
34 #include "hw/virtio/virtio-access.h"
35 #include "hw/virtio/virtio-iommu.h"
36 #include "hw/pci/pci_bus.h"
37 #include "hw/pci/pci.h"
38
39 /* Max size */
40 #define VIOMMU_DEFAULT_QUEUE_SIZE 256
41 #define VIOMMU_PROBE_SIZE 512
42
43 typedef struct VirtIOIOMMUDomain {
44 uint32_t id;
45 GTree *mappings;
46 QLIST_HEAD(, VirtIOIOMMUEndpoint) endpoint_list;
47 } VirtIOIOMMUDomain;
48
49 typedef struct VirtIOIOMMUEndpoint {
50 uint32_t id;
51 VirtIOIOMMUDomain *domain;
52 IOMMUMemoryRegion *iommu_mr;
53 QLIST_ENTRY(VirtIOIOMMUEndpoint) next;
54 } VirtIOIOMMUEndpoint;
55
56 typedef struct VirtIOIOMMUInterval {
57 uint64_t low;
58 uint64_t high;
59 } VirtIOIOMMUInterval;
60
61 typedef struct VirtIOIOMMUMapping {
62 uint64_t phys_addr;
63 uint32_t flags;
64 } VirtIOIOMMUMapping;
65
66 static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev)
67 {
68 return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
69 }
70
71 /**
72 * The bus number is used for lookup when SID based operations occur.
73 * In that case we lazily populate the IOMMUPciBus array from the bus hash
74 * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus
75 * numbers may not be always initialized yet.
76 */
77 static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num)
78 {
79 IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num];
80
81 if (!iommu_pci_bus) {
82 GHashTableIter iter;
83
84 g_hash_table_iter_init(&iter, s->as_by_busptr);
85 while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
86 if (pci_bus_num(iommu_pci_bus->bus) == bus_num) {
87 s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus;
88 return iommu_pci_bus;
89 }
90 }
91 return NULL;
92 }
93 return iommu_pci_bus;
94 }
95
96 static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
97 {
98 uint8_t bus_n, devfn;
99 IOMMUPciBus *iommu_pci_bus;
100 IOMMUDevice *dev;
101
102 bus_n = PCI_BUS_NUM(sid);
103 iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
104 if (iommu_pci_bus) {
105 devfn = sid & (PCI_DEVFN_MAX - 1);
106 dev = iommu_pci_bus->pbdev[devfn];
107 if (dev) {
108 return &dev->iommu_mr;
109 }
110 }
111 return NULL;
112 }
113
114 static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
115 {
116 VirtIOIOMMUInterval *inta = (VirtIOIOMMUInterval *)a;
117 VirtIOIOMMUInterval *intb = (VirtIOIOMMUInterval *)b;
118
119 if (inta->high < intb->low) {
120 return -1;
121 } else if (intb->high < inta->low) {
122 return 1;
123 } else {
124 return 0;
125 }
126 }
127
128 static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start,
129 hwaddr virt_end, hwaddr paddr,
130 uint32_t flags)
131 {
132 IOMMUTLBEvent event;
133 IOMMUAccessFlags perm = IOMMU_ACCESS_FLAG(flags & VIRTIO_IOMMU_MAP_F_READ,
134 flags & VIRTIO_IOMMU_MAP_F_WRITE);
135
136 if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_MAP) ||
137 (flags & VIRTIO_IOMMU_MAP_F_MMIO) || !perm) {
138 return;
139 }
140
141 trace_virtio_iommu_notify_map(mr->parent_obj.name, virt_start, virt_end,
142 paddr, perm);
143
144 event.type = IOMMU_NOTIFIER_MAP;
145 event.entry.target_as = &address_space_memory;
146 event.entry.addr_mask = virt_end - virt_start;
147 event.entry.iova = virt_start;
148 event.entry.perm = perm;
149 event.entry.translated_addr = paddr;
150
151 memory_region_notify_iommu(mr, 0, event);
152 }
153
154 static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start,
155 hwaddr virt_end)
156 {
157 IOMMUTLBEvent event;
158 uint64_t delta = virt_end - virt_start;
159
160 if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) {
161 return;
162 }
163
164 trace_virtio_iommu_notify_unmap(mr->parent_obj.name, virt_start, virt_end);
165
166 event.type = IOMMU_NOTIFIER_UNMAP;
167 event.entry.target_as = &address_space_memory;
168 event.entry.perm = IOMMU_NONE;
169 event.entry.translated_addr = 0;
170 event.entry.addr_mask = delta;
171 event.entry.iova = virt_start;
172
173 if (delta == UINT64_MAX) {
174 memory_region_notify_iommu(mr, 0, event);
175 }
176
177
178 while (virt_start != virt_end + 1) {
179 uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64);
180
181 event.entry.addr_mask = mask;
182 event.entry.iova = virt_start;
183 memory_region_notify_iommu(mr, 0, event);
184 virt_start += mask + 1;
185 }
186 }
187
188 static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value,
189 gpointer data)
190 {
191 VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
192 IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
193
194 virtio_iommu_notify_unmap(mr, interval->low, interval->high);
195
196 return false;
197 }
198
199 static gboolean virtio_iommu_notify_map_cb(gpointer key, gpointer value,
200 gpointer data)
201 {
202 VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value;
203 VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
204 IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
205
206 virtio_iommu_notify_map(mr, interval->low, interval->high,
207 mapping->phys_addr, mapping->flags);
208
209 return false;
210 }
211
212 static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
213 {
214 VirtIOIOMMUDomain *domain = ep->domain;
215
216 if (!ep->domain) {
217 return;
218 }
219 g_tree_foreach(domain->mappings, virtio_iommu_notify_unmap_cb,
220 ep->iommu_mr);
221 QLIST_REMOVE(ep, next);
222 ep->domain = NULL;
223 }
224
225 static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s,
226 uint32_t ep_id)
227 {
228 VirtIOIOMMUEndpoint *ep;
229 IOMMUMemoryRegion *mr;
230
231 ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
232 if (ep) {
233 return ep;
234 }
235 mr = virtio_iommu_mr(s, ep_id);
236 if (!mr) {
237 return NULL;
238 }
239 ep = g_malloc0(sizeof(*ep));
240 ep->id = ep_id;
241 ep->iommu_mr = mr;
242 trace_virtio_iommu_get_endpoint(ep_id);
243 g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep);
244 return ep;
245 }
246
247 static void virtio_iommu_put_endpoint(gpointer data)
248 {
249 VirtIOIOMMUEndpoint *ep = (VirtIOIOMMUEndpoint *)data;
250
251 if (ep->domain) {
252 virtio_iommu_detach_endpoint_from_domain(ep);
253 }
254
255 trace_virtio_iommu_put_endpoint(ep->id);
256 g_free(ep);
257 }
258
259 static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s,
260 uint32_t domain_id)
261 {
262 VirtIOIOMMUDomain *domain;
263
264 domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
265 if (domain) {
266 return domain;
267 }
268 domain = g_malloc0(sizeof(*domain));
269 domain->id = domain_id;
270 domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
271 NULL, (GDestroyNotify)g_free,
272 (GDestroyNotify)g_free);
273 g_tree_insert(s->domains, GUINT_TO_POINTER(domain_id), domain);
274 QLIST_INIT(&domain->endpoint_list);
275 trace_virtio_iommu_get_domain(domain_id);
276 return domain;
277 }
278
279 static void virtio_iommu_put_domain(gpointer data)
280 {
281 VirtIOIOMMUDomain *domain = (VirtIOIOMMUDomain *)data;
282 VirtIOIOMMUEndpoint *iter, *tmp;
283
284 QLIST_FOREACH_SAFE(iter, &domain->endpoint_list, next, tmp) {
285 virtio_iommu_detach_endpoint_from_domain(iter);
286 }
287 g_tree_destroy(domain->mappings);
288 trace_virtio_iommu_put_domain(domain->id);
289 g_free(domain);
290 }
291
292 static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
293 int devfn)
294 {
295 VirtIOIOMMU *s = opaque;
296 IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
297 static uint32_t mr_index;
298 IOMMUDevice *sdev;
299
300 if (!sbus) {
301 sbus = g_malloc0(sizeof(IOMMUPciBus) +
302 sizeof(IOMMUDevice *) * PCI_DEVFN_MAX);
303 sbus->bus = bus;
304 g_hash_table_insert(s->as_by_busptr, bus, sbus);
305 }
306
307 sdev = sbus->pbdev[devfn];
308 if (!sdev) {
309 char *name = g_strdup_printf("%s-%d-%d",
310 TYPE_VIRTIO_IOMMU_MEMORY_REGION,
311 mr_index++, devfn);
312 sdev = sbus->pbdev[devfn] = g_malloc0(sizeof(IOMMUDevice));
313
314 sdev->viommu = s;
315 sdev->bus = bus;
316 sdev->devfn = devfn;
317
318 trace_virtio_iommu_init_iommu_mr(name);
319
320 memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr),
321 TYPE_VIRTIO_IOMMU_MEMORY_REGION,
322 OBJECT(s), name,
323 UINT64_MAX);
324 address_space_init(&sdev->as,
325 MEMORY_REGION(&sdev->iommu_mr), TYPE_VIRTIO_IOMMU);
326 g_free(name);
327 }
328 return &sdev->as;
329 }
330
331 static int virtio_iommu_attach(VirtIOIOMMU *s,
332 struct virtio_iommu_req_attach *req)
333 {
334 uint32_t domain_id = le32_to_cpu(req->domain);
335 uint32_t ep_id = le32_to_cpu(req->endpoint);
336 VirtIOIOMMUDomain *domain;
337 VirtIOIOMMUEndpoint *ep;
338
339 trace_virtio_iommu_attach(domain_id, ep_id);
340
341 ep = virtio_iommu_get_endpoint(s, ep_id);
342 if (!ep) {
343 return VIRTIO_IOMMU_S_NOENT;
344 }
345
346 if (ep->domain) {
347 VirtIOIOMMUDomain *previous_domain = ep->domain;
348 /*
349 * the device is already attached to a domain,
350 * detach it first
351 */
352 virtio_iommu_detach_endpoint_from_domain(ep);
353 if (QLIST_EMPTY(&previous_domain->endpoint_list)) {
354 g_tree_remove(s->domains, GUINT_TO_POINTER(previous_domain->id));
355 }
356 }
357
358 domain = virtio_iommu_get_domain(s, domain_id);
359 QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next);
360
361 ep->domain = domain;
362
363 /* Replay domain mappings on the associated memory region */
364 g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb,
365 ep->iommu_mr);
366
367 return VIRTIO_IOMMU_S_OK;
368 }
369
370 static int virtio_iommu_detach(VirtIOIOMMU *s,
371 struct virtio_iommu_req_detach *req)
372 {
373 uint32_t domain_id = le32_to_cpu(req->domain);
374 uint32_t ep_id = le32_to_cpu(req->endpoint);
375 VirtIOIOMMUDomain *domain;
376 VirtIOIOMMUEndpoint *ep;
377
378 trace_virtio_iommu_detach(domain_id, ep_id);
379
380 ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
381 if (!ep) {
382 return VIRTIO_IOMMU_S_NOENT;
383 }
384
385 domain = ep->domain;
386
387 if (!domain || domain->id != domain_id) {
388 return VIRTIO_IOMMU_S_INVAL;
389 }
390
391 virtio_iommu_detach_endpoint_from_domain(ep);
392
393 if (QLIST_EMPTY(&domain->endpoint_list)) {
394 g_tree_remove(s->domains, GUINT_TO_POINTER(domain->id));
395 }
396 return VIRTIO_IOMMU_S_OK;
397 }
398
399 static int virtio_iommu_map(VirtIOIOMMU *s,
400 struct virtio_iommu_req_map *req)
401 {
402 uint32_t domain_id = le32_to_cpu(req->domain);
403 uint64_t phys_start = le64_to_cpu(req->phys_start);
404 uint64_t virt_start = le64_to_cpu(req->virt_start);
405 uint64_t virt_end = le64_to_cpu(req->virt_end);
406 uint32_t flags = le32_to_cpu(req->flags);
407 VirtIOIOMMUDomain *domain;
408 VirtIOIOMMUInterval *interval;
409 VirtIOIOMMUMapping *mapping;
410 VirtIOIOMMUEndpoint *ep;
411
412 if (flags & ~VIRTIO_IOMMU_MAP_F_MASK) {
413 return VIRTIO_IOMMU_S_INVAL;
414 }
415
416 domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
417 if (!domain) {
418 return VIRTIO_IOMMU_S_NOENT;
419 }
420
421 interval = g_malloc0(sizeof(*interval));
422
423 interval->low = virt_start;
424 interval->high = virt_end;
425
426 mapping = g_tree_lookup(domain->mappings, (gpointer)interval);
427 if (mapping) {
428 g_free(interval);
429 return VIRTIO_IOMMU_S_INVAL;
430 }
431
432 trace_virtio_iommu_map(domain_id, virt_start, virt_end, phys_start, flags);
433
434 mapping = g_malloc0(sizeof(*mapping));
435 mapping->phys_addr = phys_start;
436 mapping->flags = flags;
437
438 g_tree_insert(domain->mappings, interval, mapping);
439
440 QLIST_FOREACH(ep, &domain->endpoint_list, next) {
441 virtio_iommu_notify_map(ep->iommu_mr, virt_start, virt_end, phys_start,
442 flags);
443 }
444
445 return VIRTIO_IOMMU_S_OK;
446 }
447
448 static int virtio_iommu_unmap(VirtIOIOMMU *s,
449 struct virtio_iommu_req_unmap *req)
450 {
451 uint32_t domain_id = le32_to_cpu(req->domain);
452 uint64_t virt_start = le64_to_cpu(req->virt_start);
453 uint64_t virt_end = le64_to_cpu(req->virt_end);
454 VirtIOIOMMUMapping *iter_val;
455 VirtIOIOMMUInterval interval, *iter_key;
456 VirtIOIOMMUDomain *domain;
457 VirtIOIOMMUEndpoint *ep;
458 int ret = VIRTIO_IOMMU_S_OK;
459
460 trace_virtio_iommu_unmap(domain_id, virt_start, virt_end);
461
462 domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
463 if (!domain) {
464 return VIRTIO_IOMMU_S_NOENT;
465 }
466 interval.low = virt_start;
467 interval.high = virt_end;
468
469 while (g_tree_lookup_extended(domain->mappings, &interval,
470 (void **)&iter_key, (void**)&iter_val)) {
471 uint64_t current_low = iter_key->low;
472 uint64_t current_high = iter_key->high;
473
474 if (interval.low <= current_low && interval.high >= current_high) {
475 QLIST_FOREACH(ep, &domain->endpoint_list, next) {
476 virtio_iommu_notify_unmap(ep->iommu_mr, current_low,
477 current_high);
478 }
479 g_tree_remove(domain->mappings, iter_key);
480 trace_virtio_iommu_unmap_done(domain_id, current_low, current_high);
481 } else {
482 ret = VIRTIO_IOMMU_S_RANGE;
483 break;
484 }
485 }
486 return ret;
487 }
488
489 static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep,
490 uint8_t *buf, size_t free)
491 {
492 struct virtio_iommu_probe_resv_mem prop = {};
493 size_t size = sizeof(prop), length = size - sizeof(prop.head), total;
494 int i;
495
496 total = size * s->nb_reserved_regions;
497
498 if (total > free) {
499 return -ENOSPC;
500 }
501
502 for (i = 0; i < s->nb_reserved_regions; i++) {
503 unsigned subtype = s->reserved_regions[i].type;
504
505 assert(subtype == VIRTIO_IOMMU_RESV_MEM_T_RESERVED ||
506 subtype == VIRTIO_IOMMU_RESV_MEM_T_MSI);
507 prop.head.type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM);
508 prop.head.length = cpu_to_le16(length);
509 prop.subtype = subtype;
510 prop.start = cpu_to_le64(s->reserved_regions[i].low);
511 prop.end = cpu_to_le64(s->reserved_regions[i].high);
512
513 memcpy(buf, &prop, size);
514
515 trace_virtio_iommu_fill_resv_property(ep, prop.subtype,
516 prop.start, prop.end);
517 buf += size;
518 }
519 return total;
520 }
521
522 /**
523 * virtio_iommu_probe - Fill the probe request buffer with
524 * the properties the device is able to return
525 */
526 static int virtio_iommu_probe(VirtIOIOMMU *s,
527 struct virtio_iommu_req_probe *req,
528 uint8_t *buf)
529 {
530 uint32_t ep_id = le32_to_cpu(req->endpoint);
531 size_t free = VIOMMU_PROBE_SIZE;
532 ssize_t count;
533
534 if (!virtio_iommu_mr(s, ep_id)) {
535 return VIRTIO_IOMMU_S_NOENT;
536 }
537
538 count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free);
539 if (count < 0) {
540 return VIRTIO_IOMMU_S_INVAL;
541 }
542 buf += count;
543 free -= count;
544
545 return VIRTIO_IOMMU_S_OK;
546 }
547
548 static int virtio_iommu_iov_to_req(struct iovec *iov,
549 unsigned int iov_cnt,
550 void *req, size_t req_sz)
551 {
552 size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail);
553
554 sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
555 if (unlikely(sz != payload_sz)) {
556 return VIRTIO_IOMMU_S_INVAL;
557 }
558 return 0;
559 }
560
561 #define virtio_iommu_handle_req(__req) \
562 static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s, \
563 struct iovec *iov, \
564 unsigned int iov_cnt) \
565 { \
566 struct virtio_iommu_req_ ## __req req; \
567 int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \
568 \
569 return ret ? ret : virtio_iommu_ ## __req(s, &req); \
570 }
571
572 virtio_iommu_handle_req(attach)
573 virtio_iommu_handle_req(detach)
574 virtio_iommu_handle_req(map)
575 virtio_iommu_handle_req(unmap)
576
577 static int virtio_iommu_handle_probe(VirtIOIOMMU *s,
578 struct iovec *iov,
579 unsigned int iov_cnt,
580 uint8_t *buf)
581 {
582 struct virtio_iommu_req_probe req;
583 int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req));
584
585 return ret ? ret : virtio_iommu_probe(s, &req, buf);
586 }
587
588 static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
589 {
590 VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
591 struct virtio_iommu_req_head head;
592 struct virtio_iommu_req_tail tail = {};
593 size_t output_size = sizeof(tail), sz;
594 VirtQueueElement *elem;
595 unsigned int iov_cnt;
596 struct iovec *iov;
597 void *buf = NULL;
598
599 for (;;) {
600 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
601 if (!elem) {
602 return;
603 }
604
605 if (iov_size(elem->in_sg, elem->in_num) < sizeof(tail) ||
606 iov_size(elem->out_sg, elem->out_num) < sizeof(head)) {
607 virtio_error(vdev, "virtio-iommu bad head/tail size");
608 virtqueue_detach_element(vq, elem, 0);
609 g_free(elem);
610 break;
611 }
612
613 iov_cnt = elem->out_num;
614 iov = elem->out_sg;
615 sz = iov_to_buf(iov, iov_cnt, 0, &head, sizeof(head));
616 if (unlikely(sz != sizeof(head))) {
617 tail.status = VIRTIO_IOMMU_S_DEVERR;
618 goto out;
619 }
620 qemu_mutex_lock(&s->mutex);
621 switch (head.type) {
622 case VIRTIO_IOMMU_T_ATTACH:
623 tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt);
624 break;
625 case VIRTIO_IOMMU_T_DETACH:
626 tail.status = virtio_iommu_handle_detach(s, iov, iov_cnt);
627 break;
628 case VIRTIO_IOMMU_T_MAP:
629 tail.status = virtio_iommu_handle_map(s, iov, iov_cnt);
630 break;
631 case VIRTIO_IOMMU_T_UNMAP:
632 tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
633 break;
634 case VIRTIO_IOMMU_T_PROBE:
635 {
636 struct virtio_iommu_req_tail *ptail;
637
638 output_size = s->config.probe_size + sizeof(tail);
639 buf = g_malloc0(output_size);
640
641 ptail = (struct virtio_iommu_req_tail *)
642 (buf + s->config.probe_size);
643 ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
644 break;
645 }
646 default:
647 tail.status = VIRTIO_IOMMU_S_UNSUPP;
648 }
649 qemu_mutex_unlock(&s->mutex);
650
651 out:
652 sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
653 buf ? buf : &tail, output_size);
654 assert(sz == output_size);
655
656 virtqueue_push(vq, elem, sz);
657 virtio_notify(vdev, vq);
658 g_free(elem);
659 g_free(buf);
660 }
661 }
662
663 static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason,
664 int flags, uint32_t endpoint,
665 uint64_t address)
666 {
667 VirtIODevice *vdev = &viommu->parent_obj;
668 VirtQueue *vq = viommu->event_vq;
669 struct virtio_iommu_fault fault;
670 VirtQueueElement *elem;
671 size_t sz;
672
673 memset(&fault, 0, sizeof(fault));
674 fault.reason = reason;
675 fault.flags = cpu_to_le32(flags);
676 fault.endpoint = cpu_to_le32(endpoint);
677 fault.address = cpu_to_le64(address);
678
679 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
680
681 if (!elem) {
682 error_report_once(
683 "no buffer available in event queue to report event");
684 return;
685 }
686
687 if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) {
688 virtio_error(vdev, "error buffer of wrong size");
689 virtqueue_detach_element(vq, elem, 0);
690 g_free(elem);
691 return;
692 }
693
694 sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
695 &fault, sizeof(fault));
696 assert(sz == sizeof(fault));
697
698 trace_virtio_iommu_report_fault(reason, flags, endpoint, address);
699 virtqueue_push(vq, elem, sz);
700 virtio_notify(vdev, vq);
701 g_free(elem);
702
703 }
704
705 static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
706 IOMMUAccessFlags flag,
707 int iommu_idx)
708 {
709 IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
710 VirtIOIOMMUInterval interval, *mapping_key;
711 VirtIOIOMMUMapping *mapping_value;
712 VirtIOIOMMU *s = sdev->viommu;
713 bool read_fault, write_fault;
714 VirtIOIOMMUEndpoint *ep;
715 uint32_t sid, flags;
716 bool bypass_allowed;
717 bool found;
718 int i;
719
720 interval.low = addr;
721 interval.high = addr + 1;
722
723 IOMMUTLBEntry entry = {
724 .target_as = &address_space_memory,
725 .iova = addr,
726 .translated_addr = addr,
727 .addr_mask = (1 << ctz32(s->config.page_size_mask)) - 1,
728 .perm = IOMMU_NONE,
729 };
730
731 bypass_allowed = virtio_vdev_has_feature(&s->parent_obj,
732 VIRTIO_IOMMU_F_BYPASS);
733
734 sid = virtio_iommu_get_bdf(sdev);
735
736 trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag);
737 qemu_mutex_lock(&s->mutex);
738
739 ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
740 if (!ep) {
741 if (!bypass_allowed) {
742 error_report_once("%s sid=%d is not known!!", __func__, sid);
743 virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN,
744 VIRTIO_IOMMU_FAULT_F_ADDRESS,
745 sid, addr);
746 } else {
747 entry.perm = flag;
748 }
749 goto unlock;
750 }
751
752 for (i = 0; i < s->nb_reserved_regions; i++) {
753 ReservedRegion *reg = &s->reserved_regions[i];
754
755 if (addr >= reg->low && addr <= reg->high) {
756 switch (reg->type) {
757 case VIRTIO_IOMMU_RESV_MEM_T_MSI:
758 entry.perm = flag;
759 break;
760 case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
761 default:
762 virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
763 VIRTIO_IOMMU_FAULT_F_ADDRESS,
764 sid, addr);
765 break;
766 }
767 goto unlock;
768 }
769 }
770
771 if (!ep->domain) {
772 if (!bypass_allowed) {
773 error_report_once("%s %02x:%02x.%01x not attached to any domain",
774 __func__, PCI_BUS_NUM(sid),
775 PCI_SLOT(sid), PCI_FUNC(sid));
776 virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN,
777 VIRTIO_IOMMU_FAULT_F_ADDRESS,
778 sid, addr);
779 } else {
780 entry.perm = flag;
781 }
782 goto unlock;
783 }
784
785 found = g_tree_lookup_extended(ep->domain->mappings, (gpointer)(&interval),
786 (void **)&mapping_key,
787 (void **)&mapping_value);
788 if (!found) {
789 error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d",
790 __func__, addr, sid);
791 virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
792 VIRTIO_IOMMU_FAULT_F_ADDRESS,
793 sid, addr);
794 goto unlock;
795 }
796
797 read_fault = (flag & IOMMU_RO) &&
798 !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ);
799 write_fault = (flag & IOMMU_WO) &&
800 !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE);
801
802 flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0;
803 flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0;
804 if (flags) {
805 error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d",
806 __func__, addr, flag, mapping_value->flags);
807 flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS;
808 virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
809 flags | VIRTIO_IOMMU_FAULT_F_ADDRESS,
810 sid, addr);
811 goto unlock;
812 }
813 entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr;
814 entry.perm = flag;
815 trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid);
816
817 unlock:
818 qemu_mutex_unlock(&s->mutex);
819 return entry;
820 }
821
822 static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data)
823 {
824 VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
825 struct virtio_iommu_config *dev_config = &dev->config;
826 struct virtio_iommu_config *out_config = (void *)config_data;
827
828 out_config->page_size_mask = cpu_to_le64(dev_config->page_size_mask);
829 out_config->input_range.start = cpu_to_le64(dev_config->input_range.start);
830 out_config->input_range.end = cpu_to_le64(dev_config->input_range.end);
831 out_config->domain_range.start = cpu_to_le32(dev_config->domain_range.start);
832 out_config->domain_range.end = cpu_to_le32(dev_config->domain_range.end);
833 out_config->probe_size = cpu_to_le32(dev_config->probe_size);
834
835 trace_virtio_iommu_get_config(dev_config->page_size_mask,
836 dev_config->input_range.start,
837 dev_config->input_range.end,
838 dev_config->domain_range.start,
839 dev_config->domain_range.end,
840 dev_config->probe_size);
841 }
842
843 static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f,
844 Error **errp)
845 {
846 VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
847
848 f |= dev->features;
849 trace_virtio_iommu_get_features(f);
850 return f;
851 }
852
853 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
854 {
855 guint ua = GPOINTER_TO_UINT(a);
856 guint ub = GPOINTER_TO_UINT(b);
857 return (ua > ub) - (ua < ub);
858 }
859
860 static gboolean virtio_iommu_remap(gpointer key, gpointer value, gpointer data)
861 {
862 VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value;
863 VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key;
864 IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data;
865
866 trace_virtio_iommu_remap(mr->parent_obj.name, interval->low, interval->high,
867 mapping->phys_addr);
868 virtio_iommu_notify_map(mr, interval->low, interval->high,
869 mapping->phys_addr, mapping->flags);
870 return false;
871 }
872
873 static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n)
874 {
875 IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
876 VirtIOIOMMU *s = sdev->viommu;
877 uint32_t sid;
878 VirtIOIOMMUEndpoint *ep;
879
880 sid = virtio_iommu_get_bdf(sdev);
881
882 qemu_mutex_lock(&s->mutex);
883
884 if (!s->endpoints) {
885 goto unlock;
886 }
887
888 ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
889 if (!ep || !ep->domain) {
890 goto unlock;
891 }
892
893 g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr);
894
895 unlock:
896 qemu_mutex_unlock(&s->mutex);
897 }
898
899 static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr,
900 IOMMUNotifierFlag old,
901 IOMMUNotifierFlag new,
902 Error **errp)
903 {
904 if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
905 error_setg(errp, "Virtio-iommu does not support dev-iotlb yet");
906 return -EINVAL;
907 }
908
909 if (old == IOMMU_NOTIFIER_NONE) {
910 trace_virtio_iommu_notify_flag_add(iommu_mr->parent_obj.name);
911 } else if (new == IOMMU_NOTIFIER_NONE) {
912 trace_virtio_iommu_notify_flag_del(iommu_mr->parent_obj.name);
913 }
914 return 0;
915 }
916
917 /*
918 * The default mask (TARGET_PAGE_MASK) is the smallest supported guest granule,
919 * for example 0xfffffffffffff000. When an assigned device has page size
920 * restrictions due to the hardware IOMMU configuration, apply this restriction
921 * to the mask.
922 */
923 static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr,
924 uint64_t new_mask,
925 Error **errp)
926 {
927 IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
928 VirtIOIOMMU *s = sdev->viommu;
929 uint64_t cur_mask = s->config.page_size_mask;
930
931 trace_virtio_iommu_set_page_size_mask(mr->parent_obj.name, cur_mask,
932 new_mask);
933
934 if ((cur_mask & new_mask) == 0) {
935 error_setg(errp, "virtio-iommu page mask 0x%"PRIx64
936 " is incompatible with mask 0x%"PRIx64, cur_mask, new_mask);
937 return -1;
938 }
939
940 /*
941 * After the machine is finalized, we can't change the mask anymore. If by
942 * chance the hotplugged device supports the same granule, we can still
943 * accept it. Having a different masks is possible but the guest will use
944 * sub-optimal block sizes, so warn about it.
945 */
946 if (phase_check(PHASE_MACHINE_READY)) {
947 int new_granule = ctz64(new_mask);
948 int cur_granule = ctz64(cur_mask);
949
950 if (new_granule != cur_granule) {
951 error_setg(errp, "virtio-iommu page mask 0x%"PRIx64
952 " is incompatible with mask 0x%"PRIx64, cur_mask,
953 new_mask);
954 return -1;
955 } else if (new_mask != cur_mask) {
956 warn_report("virtio-iommu page mask 0x%"PRIx64
957 " does not match 0x%"PRIx64, cur_mask, new_mask);
958 }
959 return 0;
960 }
961
962 s->config.page_size_mask &= new_mask;
963 return 0;
964 }
965
966 static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
967 {
968 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
969 VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
970
971 virtio_init(vdev, "virtio-iommu", VIRTIO_ID_IOMMU,
972 sizeof(struct virtio_iommu_config));
973
974 memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num));
975
976 s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE,
977 virtio_iommu_handle_command);
978 s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);
979
980 s->config.page_size_mask = TARGET_PAGE_MASK;
981 s->config.input_range.end = UINT64_MAX;
982 s->config.domain_range.end = UINT32_MAX;
983 s->config.probe_size = VIOMMU_PROBE_SIZE;
984
985 virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX);
986 virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC);
987 virtio_add_feature(&s->features, VIRTIO_F_VERSION_1);
988 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_INPUT_RANGE);
989 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_DOMAIN_RANGE);
990 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP);
991 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS);
992 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO);
993 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE);
994
995 qemu_mutex_init(&s->mutex);
996
997 s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free);
998
999 if (s->primary_bus) {
1000 pci_setup_iommu(s->primary_bus, virtio_iommu_find_add_as, s);
1001 } else {
1002 error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!");
1003 }
1004 }
1005
1006 static void virtio_iommu_device_unrealize(DeviceState *dev)
1007 {
1008 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1009 VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
1010
1011 g_hash_table_destroy(s->as_by_busptr);
1012 if (s->domains) {
1013 g_tree_destroy(s->domains);
1014 }
1015 if (s->endpoints) {
1016 g_tree_destroy(s->endpoints);
1017 }
1018
1019 virtio_delete_queue(s->req_vq);
1020 virtio_delete_queue(s->event_vq);
1021 virtio_cleanup(vdev);
1022 }
1023
1024 static void virtio_iommu_device_reset(VirtIODevice *vdev)
1025 {
1026 VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
1027
1028 trace_virtio_iommu_device_reset();
1029
1030 if (s->domains) {
1031 g_tree_destroy(s->domains);
1032 }
1033 if (s->endpoints) {
1034 g_tree_destroy(s->endpoints);
1035 }
1036 s->domains = g_tree_new_full((GCompareDataFunc)int_cmp,
1037 NULL, NULL, virtio_iommu_put_domain);
1038 s->endpoints = g_tree_new_full((GCompareDataFunc)int_cmp,
1039 NULL, NULL, virtio_iommu_put_endpoint);
1040 }
1041
1042 static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status)
1043 {
1044 trace_virtio_iommu_device_status(status);
1045 }
1046
1047 static void virtio_iommu_instance_init(Object *obj)
1048 {
1049 }
1050
1051 #define VMSTATE_INTERVAL \
1052 { \
1053 .name = "interval", \
1054 .version_id = 1, \
1055 .minimum_version_id = 1, \
1056 .fields = (VMStateField[]) { \
1057 VMSTATE_UINT64(low, VirtIOIOMMUInterval), \
1058 VMSTATE_UINT64(high, VirtIOIOMMUInterval), \
1059 VMSTATE_END_OF_LIST() \
1060 } \
1061 }
1062
1063 #define VMSTATE_MAPPING \
1064 { \
1065 .name = "mapping", \
1066 .version_id = 1, \
1067 .minimum_version_id = 1, \
1068 .fields = (VMStateField[]) { \
1069 VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\
1070 VMSTATE_UINT32(flags, VirtIOIOMMUMapping), \
1071 VMSTATE_END_OF_LIST() \
1072 }, \
1073 }
1074
1075 static const VMStateDescription vmstate_interval_mapping[2] = {
1076 VMSTATE_MAPPING, /* value */
1077 VMSTATE_INTERVAL /* key */
1078 };
1079
1080 static int domain_preload(void *opaque)
1081 {
1082 VirtIOIOMMUDomain *domain = opaque;
1083
1084 domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
1085 NULL, g_free, g_free);
1086 return 0;
1087 }
1088
1089 static const VMStateDescription vmstate_endpoint = {
1090 .name = "endpoint",
1091 .version_id = 1,
1092 .minimum_version_id = 1,
1093 .fields = (VMStateField[]) {
1094 VMSTATE_UINT32(id, VirtIOIOMMUEndpoint),
1095 VMSTATE_END_OF_LIST()
1096 }
1097 };
1098
1099 static const VMStateDescription vmstate_domain = {
1100 .name = "domain",
1101 .version_id = 1,
1102 .minimum_version_id = 1,
1103 .pre_load = domain_preload,
1104 .fields = (VMStateField[]) {
1105 VMSTATE_UINT32(id, VirtIOIOMMUDomain),
1106 VMSTATE_GTREE_V(mappings, VirtIOIOMMUDomain, 1,
1107 vmstate_interval_mapping,
1108 VirtIOIOMMUInterval, VirtIOIOMMUMapping),
1109 VMSTATE_QLIST_V(endpoint_list, VirtIOIOMMUDomain, 1,
1110 vmstate_endpoint, VirtIOIOMMUEndpoint, next),
1111 VMSTATE_END_OF_LIST()
1112 }
1113 };
1114
1115 static gboolean reconstruct_endpoints(gpointer key, gpointer value,
1116 gpointer data)
1117 {
1118 VirtIOIOMMU *s = (VirtIOIOMMU *)data;
1119 VirtIOIOMMUDomain *d = (VirtIOIOMMUDomain *)value;
1120 VirtIOIOMMUEndpoint *iter;
1121 IOMMUMemoryRegion *mr;
1122
1123 QLIST_FOREACH(iter, &d->endpoint_list, next) {
1124 mr = virtio_iommu_mr(s, iter->id);
1125 assert(mr);
1126
1127 iter->domain = d;
1128 iter->iommu_mr = mr;
1129 g_tree_insert(s->endpoints, GUINT_TO_POINTER(iter->id), iter);
1130 }
1131 return false; /* continue the domain traversal */
1132 }
1133
1134 static int iommu_post_load(void *opaque, int version_id)
1135 {
1136 VirtIOIOMMU *s = opaque;
1137
1138 g_tree_foreach(s->domains, reconstruct_endpoints, s);
1139 return 0;
1140 }
1141
1142 static const VMStateDescription vmstate_virtio_iommu_device = {
1143 .name = "virtio-iommu-device",
1144 .minimum_version_id = 1,
1145 .version_id = 1,
1146 .post_load = iommu_post_load,
1147 .fields = (VMStateField[]) {
1148 VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 1,
1149 &vmstate_domain, VirtIOIOMMUDomain),
1150 VMSTATE_END_OF_LIST()
1151 },
1152 };
1153
1154 static const VMStateDescription vmstate_virtio_iommu = {
1155 .name = "virtio-iommu",
1156 .minimum_version_id = 1,
1157 .priority = MIG_PRI_IOMMU,
1158 .version_id = 1,
1159 .fields = (VMStateField[]) {
1160 VMSTATE_VIRTIO_DEVICE,
1161 VMSTATE_END_OF_LIST()
1162 },
1163 };
1164
1165 static Property virtio_iommu_properties[] = {
1166 DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, "PCI", PCIBus *),
1167 DEFINE_PROP_END_OF_LIST(),
1168 };
1169
1170 static void virtio_iommu_class_init(ObjectClass *klass, void *data)
1171 {
1172 DeviceClass *dc = DEVICE_CLASS(klass);
1173 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1174
1175 device_class_set_props(dc, virtio_iommu_properties);
1176 dc->vmsd = &vmstate_virtio_iommu;
1177
1178 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1179 vdc->realize = virtio_iommu_device_realize;
1180 vdc->unrealize = virtio_iommu_device_unrealize;
1181 vdc->reset = virtio_iommu_device_reset;
1182 vdc->get_config = virtio_iommu_get_config;
1183 vdc->get_features = virtio_iommu_get_features;
1184 vdc->set_status = virtio_iommu_set_status;
1185 vdc->vmsd = &vmstate_virtio_iommu_device;
1186 }
1187
1188 static void virtio_iommu_memory_region_class_init(ObjectClass *klass,
1189 void *data)
1190 {
1191 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1192
1193 imrc->translate = virtio_iommu_translate;
1194 imrc->replay = virtio_iommu_replay;
1195 imrc->notify_flag_changed = virtio_iommu_notify_flag_changed;
1196 imrc->iommu_set_page_size_mask = virtio_iommu_set_page_size_mask;
1197 }
1198
1199 static const TypeInfo virtio_iommu_info = {
1200 .name = TYPE_VIRTIO_IOMMU,
1201 .parent = TYPE_VIRTIO_DEVICE,
1202 .instance_size = sizeof(VirtIOIOMMU),
1203 .instance_init = virtio_iommu_instance_init,
1204 .class_init = virtio_iommu_class_init,
1205 };
1206
1207 static const TypeInfo virtio_iommu_memory_region_info = {
1208 .parent = TYPE_IOMMU_MEMORY_REGION,
1209 .name = TYPE_VIRTIO_IOMMU_MEMORY_REGION,
1210 .class_init = virtio_iommu_memory_region_class_init,
1211 };
1212
1213 static void virtio_register_types(void)
1214 {
1215 type_register_static(&virtio_iommu_info);
1216 type_register_static(&virtio_iommu_memory_region_info);
1217 }
1218
1219 type_init(virtio_register_types)