virtio: add Virtio*BusClass sizes
[qemu.git] / hw / virtio / virtio-pci.c
1 /*
2 * Virtio PCI Bindings
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include "qemu/osdep.h"
19
20 #include "exec/memop.h"
21 #include "standard-headers/linux/virtio_pci.h"
22 #include "hw/boards.h"
23 #include "hw/virtio/virtio.h"
24 #include "migration/qemu-file-types.h"
25 #include "hw/pci/pci.h"
26 #include "hw/pci/pci_bus.h"
27 #include "hw/qdev-properties.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
30 #include "qemu/module.h"
31 #include "hw/pci/msi.h"
32 #include "hw/pci/msix.h"
33 #include "hw/loader.h"
34 #include "sysemu/kvm.h"
35 #include "virtio-pci.h"
36 #include "qemu/range.h"
37 #include "hw/virtio/virtio-bus.h"
38 #include "qapi/visitor.h"
39
40 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
41
42 #undef VIRTIO_PCI_CONFIG
43
44 /* The remaining space is defined by each driver as the per-driver
45 * configuration space */
46 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
47
48 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
49 VirtIOPCIProxy *dev);
50 static void virtio_pci_reset(DeviceState *qdev);
51
52 /* virtio device */
53 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
54 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
55 {
56 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
57 }
58
59 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
60 * be careful and test performance if you change this.
61 */
62 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
63 {
64 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
65 }
66
67 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
68 {
69 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
70
71 if (msix_enabled(&proxy->pci_dev))
72 msix_notify(&proxy->pci_dev, vector);
73 else {
74 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
75 pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1);
76 }
77 }
78
79 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
80 {
81 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
82 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
83
84 pci_device_save(&proxy->pci_dev, f);
85 msix_save(&proxy->pci_dev, f);
86 if (msix_present(&proxy->pci_dev))
87 qemu_put_be16(f, vdev->config_vector);
88 }
89
90 static const VMStateDescription vmstate_virtio_pci_modern_queue_state = {
91 .name = "virtio_pci/modern_queue_state",
92 .version_id = 1,
93 .minimum_version_id = 1,
94 .fields = (VMStateField[]) {
95 VMSTATE_UINT16(num, VirtIOPCIQueue),
96 VMSTATE_UNUSED(1), /* enabled was stored as be16 */
97 VMSTATE_BOOL(enabled, VirtIOPCIQueue),
98 VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2),
99 VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2),
100 VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2),
101 VMSTATE_END_OF_LIST()
102 }
103 };
104
105 static bool virtio_pci_modern_state_needed(void *opaque)
106 {
107 VirtIOPCIProxy *proxy = opaque;
108
109 return virtio_pci_modern(proxy);
110 }
111
112 static const VMStateDescription vmstate_virtio_pci_modern_state_sub = {
113 .name = "virtio_pci/modern_state",
114 .version_id = 1,
115 .minimum_version_id = 1,
116 .needed = &virtio_pci_modern_state_needed,
117 .fields = (VMStateField[]) {
118 VMSTATE_UINT32(dfselect, VirtIOPCIProxy),
119 VMSTATE_UINT32(gfselect, VirtIOPCIProxy),
120 VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2),
121 VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0,
122 vmstate_virtio_pci_modern_queue_state,
123 VirtIOPCIQueue),
124 VMSTATE_END_OF_LIST()
125 }
126 };
127
128 static const VMStateDescription vmstate_virtio_pci = {
129 .name = "virtio_pci",
130 .version_id = 1,
131 .minimum_version_id = 1,
132 .minimum_version_id_old = 1,
133 .fields = (VMStateField[]) {
134 VMSTATE_END_OF_LIST()
135 },
136 .subsections = (const VMStateDescription*[]) {
137 &vmstate_virtio_pci_modern_state_sub,
138 NULL
139 }
140 };
141
142 static bool virtio_pci_has_extra_state(DeviceState *d)
143 {
144 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
145
146 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
147 }
148
149 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
150 {
151 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
152
153 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
154 }
155
156 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
157 {
158 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
159
160 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
161 }
162
163 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
164 {
165 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
166 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
167
168 if (msix_present(&proxy->pci_dev))
169 qemu_put_be16(f, virtio_queue_vector(vdev, n));
170 }
171
172 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
173 {
174 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
175 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
176
177 int ret;
178 ret = pci_device_load(&proxy->pci_dev, f);
179 if (ret) {
180 return ret;
181 }
182 msix_unuse_all_vectors(&proxy->pci_dev);
183 msix_load(&proxy->pci_dev, f);
184 if (msix_present(&proxy->pci_dev)) {
185 qemu_get_be16s(f, &vdev->config_vector);
186 } else {
187 vdev->config_vector = VIRTIO_NO_VECTOR;
188 }
189 if (vdev->config_vector != VIRTIO_NO_VECTOR) {
190 return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
191 }
192 return 0;
193 }
194
195 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
196 {
197 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
198 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
199
200 uint16_t vector;
201 if (msix_present(&proxy->pci_dev)) {
202 qemu_get_be16s(f, &vector);
203 } else {
204 vector = VIRTIO_NO_VECTOR;
205 }
206 virtio_queue_set_vector(vdev, n, vector);
207 if (vector != VIRTIO_NO_VECTOR) {
208 return msix_vector_use(&proxy->pci_dev, vector);
209 }
210
211 return 0;
212 }
213
214 static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
215 {
216 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
217
218 return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
219 }
220
221 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
222
223 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy)
224 {
225 return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ?
226 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4;
227 }
228
229 static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
230 int n, bool assign)
231 {
232 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
233 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
234 VirtQueue *vq = virtio_get_queue(vdev, n);
235 bool legacy = virtio_pci_legacy(proxy);
236 bool modern = virtio_pci_modern(proxy);
237 bool fast_mmio = kvm_ioeventfd_any_length_enabled();
238 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
239 MemoryRegion *modern_mr = &proxy->notify.mr;
240 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
241 MemoryRegion *legacy_mr = &proxy->bar;
242 hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) *
243 virtio_get_queue_index(vq);
244 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
245
246 if (assign) {
247 if (modern) {
248 if (fast_mmio) {
249 memory_region_add_eventfd(modern_mr, modern_addr, 0,
250 false, n, notifier);
251 } else {
252 memory_region_add_eventfd(modern_mr, modern_addr, 2,
253 false, n, notifier);
254 }
255 if (modern_pio) {
256 memory_region_add_eventfd(modern_notify_mr, 0, 2,
257 true, n, notifier);
258 }
259 }
260 if (legacy) {
261 memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
262 true, n, notifier);
263 }
264 } else {
265 if (modern) {
266 if (fast_mmio) {
267 memory_region_del_eventfd(modern_mr, modern_addr, 0,
268 false, n, notifier);
269 } else {
270 memory_region_del_eventfd(modern_mr, modern_addr, 2,
271 false, n, notifier);
272 }
273 if (modern_pio) {
274 memory_region_del_eventfd(modern_notify_mr, 0, 2,
275 true, n, notifier);
276 }
277 }
278 if (legacy) {
279 memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
280 true, n, notifier);
281 }
282 }
283 return 0;
284 }
285
286 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
287 {
288 virtio_bus_start_ioeventfd(&proxy->bus);
289 }
290
291 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
292 {
293 virtio_bus_stop_ioeventfd(&proxy->bus);
294 }
295
296 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
297 {
298 VirtIOPCIProxy *proxy = opaque;
299 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
300 hwaddr pa;
301
302 switch (addr) {
303 case VIRTIO_PCI_GUEST_FEATURES:
304 /* Guest does not negotiate properly? We have to assume nothing. */
305 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
306 val = virtio_bus_get_vdev_bad_features(&proxy->bus);
307 }
308 virtio_set_features(vdev, val);
309 break;
310 case VIRTIO_PCI_QUEUE_PFN:
311 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
312 if (pa == 0) {
313 virtio_pci_reset(DEVICE(proxy));
314 }
315 else
316 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
317 break;
318 case VIRTIO_PCI_QUEUE_SEL:
319 if (val < VIRTIO_QUEUE_MAX)
320 vdev->queue_sel = val;
321 break;
322 case VIRTIO_PCI_QUEUE_NOTIFY:
323 if (val < VIRTIO_QUEUE_MAX) {
324 virtio_queue_notify(vdev, val);
325 }
326 break;
327 case VIRTIO_PCI_STATUS:
328 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
329 virtio_pci_stop_ioeventfd(proxy);
330 }
331
332 virtio_set_status(vdev, val & 0xFF);
333
334 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
335 virtio_pci_start_ioeventfd(proxy);
336 }
337
338 if (vdev->status == 0) {
339 virtio_pci_reset(DEVICE(proxy));
340 }
341
342 /* Linux before 2.6.34 drives the device without enabling
343 the PCI device bus master bit. Enable it automatically
344 for the guest. This is a PCI spec violation but so is
345 initiating DMA with bus master bit clear. */
346 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
347 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
348 proxy->pci_dev.config[PCI_COMMAND] |
349 PCI_COMMAND_MASTER, 1);
350 }
351 break;
352 case VIRTIO_MSI_CONFIG_VECTOR:
353 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
354 /* Make it possible for guest to discover an error took place. */
355 if (msix_vector_use(&proxy->pci_dev, val) < 0)
356 val = VIRTIO_NO_VECTOR;
357 vdev->config_vector = val;
358 break;
359 case VIRTIO_MSI_QUEUE_VECTOR:
360 msix_vector_unuse(&proxy->pci_dev,
361 virtio_queue_vector(vdev, vdev->queue_sel));
362 /* Make it possible for guest to discover an error took place. */
363 if (msix_vector_use(&proxy->pci_dev, val) < 0)
364 val = VIRTIO_NO_VECTOR;
365 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
366 break;
367 default:
368 error_report("%s: unexpected address 0x%x value 0x%x",
369 __func__, addr, val);
370 break;
371 }
372 }
373
374 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
375 {
376 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
377 uint32_t ret = 0xFFFFFFFF;
378
379 switch (addr) {
380 case VIRTIO_PCI_HOST_FEATURES:
381 ret = vdev->host_features;
382 break;
383 case VIRTIO_PCI_GUEST_FEATURES:
384 ret = vdev->guest_features;
385 break;
386 case VIRTIO_PCI_QUEUE_PFN:
387 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
388 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
389 break;
390 case VIRTIO_PCI_QUEUE_NUM:
391 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
392 break;
393 case VIRTIO_PCI_QUEUE_SEL:
394 ret = vdev->queue_sel;
395 break;
396 case VIRTIO_PCI_STATUS:
397 ret = vdev->status;
398 break;
399 case VIRTIO_PCI_ISR:
400 /* reading from the ISR also clears it. */
401 ret = atomic_xchg(&vdev->isr, 0);
402 pci_irq_deassert(&proxy->pci_dev);
403 break;
404 case VIRTIO_MSI_CONFIG_VECTOR:
405 ret = vdev->config_vector;
406 break;
407 case VIRTIO_MSI_QUEUE_VECTOR:
408 ret = virtio_queue_vector(vdev, vdev->queue_sel);
409 break;
410 default:
411 break;
412 }
413
414 return ret;
415 }
416
417 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
418 unsigned size)
419 {
420 VirtIOPCIProxy *proxy = opaque;
421 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
422 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
423 uint64_t val = 0;
424 if (addr < config) {
425 return virtio_ioport_read(proxy, addr);
426 }
427 addr -= config;
428
429 switch (size) {
430 case 1:
431 val = virtio_config_readb(vdev, addr);
432 break;
433 case 2:
434 val = virtio_config_readw(vdev, addr);
435 if (virtio_is_big_endian(vdev)) {
436 val = bswap16(val);
437 }
438 break;
439 case 4:
440 val = virtio_config_readl(vdev, addr);
441 if (virtio_is_big_endian(vdev)) {
442 val = bswap32(val);
443 }
444 break;
445 }
446 return val;
447 }
448
449 static void virtio_pci_config_write(void *opaque, hwaddr addr,
450 uint64_t val, unsigned size)
451 {
452 VirtIOPCIProxy *proxy = opaque;
453 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
454 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
455 if (addr < config) {
456 virtio_ioport_write(proxy, addr, val);
457 return;
458 }
459 addr -= config;
460 /*
461 * Virtio-PCI is odd. Ioports are LE but config space is target native
462 * endian.
463 */
464 switch (size) {
465 case 1:
466 virtio_config_writeb(vdev, addr, val);
467 break;
468 case 2:
469 if (virtio_is_big_endian(vdev)) {
470 val = bswap16(val);
471 }
472 virtio_config_writew(vdev, addr, val);
473 break;
474 case 4:
475 if (virtio_is_big_endian(vdev)) {
476 val = bswap32(val);
477 }
478 virtio_config_writel(vdev, addr, val);
479 break;
480 }
481 }
482
483 static const MemoryRegionOps virtio_pci_config_ops = {
484 .read = virtio_pci_config_read,
485 .write = virtio_pci_config_write,
486 .impl = {
487 .min_access_size = 1,
488 .max_access_size = 4,
489 },
490 .endianness = DEVICE_LITTLE_ENDIAN,
491 };
492
493 static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy,
494 hwaddr *off, int len)
495 {
496 int i;
497 VirtIOPCIRegion *reg;
498
499 for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) {
500 reg = &proxy->regs[i];
501 if (*off >= reg->offset &&
502 *off + len <= reg->offset + reg->size) {
503 *off -= reg->offset;
504 return &reg->mr;
505 }
506 }
507
508 return NULL;
509 }
510
511 /* Below are generic functions to do memcpy from/to an address space,
512 * without byteswaps, with input validation.
513 *
514 * As regular address_space_* APIs all do some kind of byteswap at least for
515 * some host/target combinations, we are forced to explicitly convert to a
516 * known-endianness integer value.
517 * It doesn't really matter which endian format to go through, so the code
518 * below selects the endian that causes the least amount of work on the given
519 * host.
520 *
521 * Note: host pointer must be aligned.
522 */
523 static
524 void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
525 const uint8_t *buf, int len)
526 {
527 uint64_t val;
528 MemoryRegion *mr;
529
530 /* address_space_* APIs assume an aligned address.
531 * As address is under guest control, handle illegal values.
532 */
533 addr &= ~(len - 1);
534
535 mr = virtio_address_space_lookup(proxy, &addr, len);
536 if (!mr) {
537 return;
538 }
539
540 /* Make sure caller aligned buf properly */
541 assert(!(((uintptr_t)buf) & (len - 1)));
542
543 switch (len) {
544 case 1:
545 val = pci_get_byte(buf);
546 break;
547 case 2:
548 val = pci_get_word(buf);
549 break;
550 case 4:
551 val = pci_get_long(buf);
552 break;
553 default:
554 /* As length is under guest control, handle illegal values. */
555 return;
556 }
557 memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE,
558 MEMTXATTRS_UNSPECIFIED);
559 }
560
561 static void
562 virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
563 uint8_t *buf, int len)
564 {
565 uint64_t val;
566 MemoryRegion *mr;
567
568 /* address_space_* APIs assume an aligned address.
569 * As address is under guest control, handle illegal values.
570 */
571 addr &= ~(len - 1);
572
573 mr = virtio_address_space_lookup(proxy, &addr, len);
574 if (!mr) {
575 return;
576 }
577
578 /* Make sure caller aligned buf properly */
579 assert(!(((uintptr_t)buf) & (len - 1)));
580
581 memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE,
582 MEMTXATTRS_UNSPECIFIED);
583 switch (len) {
584 case 1:
585 pci_set_byte(buf, val);
586 break;
587 case 2:
588 pci_set_word(buf, val);
589 break;
590 case 4:
591 pci_set_long(buf, val);
592 break;
593 default:
594 /* As length is under guest control, handle illegal values. */
595 break;
596 }
597 }
598
599 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
600 uint32_t val, int len)
601 {
602 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
603 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
604 struct virtio_pci_cfg_cap *cfg;
605
606 pci_default_write_config(pci_dev, address, val, len);
607
608 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
609 pcie_cap_flr_write_config(pci_dev, address, val, len);
610 }
611
612 if (range_covers_byte(address, len, PCI_COMMAND)) {
613 if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
614 virtio_set_disabled(vdev, true);
615 virtio_pci_stop_ioeventfd(proxy);
616 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
617 } else {
618 virtio_set_disabled(vdev, false);
619 }
620 }
621
622 if (proxy->config_cap &&
623 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
624 pci_cfg_data),
625 sizeof cfg->pci_cfg_data)) {
626 uint32_t off;
627 uint32_t len;
628
629 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
630 off = le32_to_cpu(cfg->cap.offset);
631 len = le32_to_cpu(cfg->cap.length);
632
633 if (len == 1 || len == 2 || len == 4) {
634 assert(len <= sizeof cfg->pci_cfg_data);
635 virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len);
636 }
637 }
638 }
639
640 static uint32_t virtio_read_config(PCIDevice *pci_dev,
641 uint32_t address, int len)
642 {
643 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
644 struct virtio_pci_cfg_cap *cfg;
645
646 if (proxy->config_cap &&
647 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
648 pci_cfg_data),
649 sizeof cfg->pci_cfg_data)) {
650 uint32_t off;
651 uint32_t len;
652
653 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
654 off = le32_to_cpu(cfg->cap.offset);
655 len = le32_to_cpu(cfg->cap.length);
656
657 if (len == 1 || len == 2 || len == 4) {
658 assert(len <= sizeof cfg->pci_cfg_data);
659 virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len);
660 }
661 }
662
663 return pci_default_read_config(pci_dev, address, len);
664 }
665
666 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
667 unsigned int queue_no,
668 unsigned int vector)
669 {
670 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
671 int ret;
672
673 if (irqfd->users == 0) {
674 ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev);
675 if (ret < 0) {
676 return ret;
677 }
678 irqfd->virq = ret;
679 }
680 irqfd->users++;
681 return 0;
682 }
683
684 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
685 unsigned int vector)
686 {
687 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
688 if (--irqfd->users == 0) {
689 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
690 }
691 }
692
693 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
694 unsigned int queue_no,
695 unsigned int vector)
696 {
697 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
698 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
699 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
700 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
701 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
702 }
703
704 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
705 unsigned int queue_no,
706 unsigned int vector)
707 {
708 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
709 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
710 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
711 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
712 int ret;
713
714 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
715 assert(ret == 0);
716 }
717
718 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
719 {
720 PCIDevice *dev = &proxy->pci_dev;
721 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
722 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
723 unsigned int vector;
724 int ret, queue_no;
725
726 for (queue_no = 0; queue_no < nvqs; queue_no++) {
727 if (!virtio_queue_get_num(vdev, queue_no)) {
728 break;
729 }
730 vector = virtio_queue_vector(vdev, queue_no);
731 if (vector >= msix_nr_vectors_allocated(dev)) {
732 continue;
733 }
734 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
735 if (ret < 0) {
736 goto undo;
737 }
738 /* If guest supports masking, set up irqfd now.
739 * Otherwise, delay until unmasked in the frontend.
740 */
741 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
742 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
743 if (ret < 0) {
744 kvm_virtio_pci_vq_vector_release(proxy, vector);
745 goto undo;
746 }
747 }
748 }
749 return 0;
750
751 undo:
752 while (--queue_no >= 0) {
753 vector = virtio_queue_vector(vdev, queue_no);
754 if (vector >= msix_nr_vectors_allocated(dev)) {
755 continue;
756 }
757 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
758 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
759 }
760 kvm_virtio_pci_vq_vector_release(proxy, vector);
761 }
762 return ret;
763 }
764
765 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
766 {
767 PCIDevice *dev = &proxy->pci_dev;
768 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
769 unsigned int vector;
770 int queue_no;
771 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
772
773 for (queue_no = 0; queue_no < nvqs; queue_no++) {
774 if (!virtio_queue_get_num(vdev, queue_no)) {
775 break;
776 }
777 vector = virtio_queue_vector(vdev, queue_no);
778 if (vector >= msix_nr_vectors_allocated(dev)) {
779 continue;
780 }
781 /* If guest supports masking, clean up irqfd now.
782 * Otherwise, it was cleaned when masked in the frontend.
783 */
784 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
785 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
786 }
787 kvm_virtio_pci_vq_vector_release(proxy, vector);
788 }
789 }
790
791 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
792 unsigned int queue_no,
793 unsigned int vector,
794 MSIMessage msg)
795 {
796 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
797 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
798 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
799 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
800 VirtIOIRQFD *irqfd;
801 int ret = 0;
802
803 if (proxy->vector_irqfd) {
804 irqfd = &proxy->vector_irqfd[vector];
805 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
806 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
807 &proxy->pci_dev);
808 if (ret < 0) {
809 return ret;
810 }
811 kvm_irqchip_commit_routes(kvm_state);
812 }
813 }
814
815 /* If guest supports masking, irqfd is already setup, unmask it.
816 * Otherwise, set it up now.
817 */
818 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
819 k->guest_notifier_mask(vdev, queue_no, false);
820 /* Test after unmasking to avoid losing events. */
821 if (k->guest_notifier_pending &&
822 k->guest_notifier_pending(vdev, queue_no)) {
823 event_notifier_set(n);
824 }
825 } else {
826 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
827 }
828 return ret;
829 }
830
831 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
832 unsigned int queue_no,
833 unsigned int vector)
834 {
835 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
836 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
837
838 /* If guest supports masking, keep irqfd but mask it.
839 * Otherwise, clean it up now.
840 */
841 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
842 k->guest_notifier_mask(vdev, queue_no, true);
843 } else {
844 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
845 }
846 }
847
848 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
849 MSIMessage msg)
850 {
851 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
852 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
853 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
854 int ret, index, unmasked = 0;
855
856 while (vq) {
857 index = virtio_get_queue_index(vq);
858 if (!virtio_queue_get_num(vdev, index)) {
859 break;
860 }
861 if (index < proxy->nvqs_with_notifiers) {
862 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
863 if (ret < 0) {
864 goto undo;
865 }
866 ++unmasked;
867 }
868 vq = virtio_vector_next_queue(vq);
869 }
870
871 return 0;
872
873 undo:
874 vq = virtio_vector_first_queue(vdev, vector);
875 while (vq && unmasked >= 0) {
876 index = virtio_get_queue_index(vq);
877 if (index < proxy->nvqs_with_notifiers) {
878 virtio_pci_vq_vector_mask(proxy, index, vector);
879 --unmasked;
880 }
881 vq = virtio_vector_next_queue(vq);
882 }
883 return ret;
884 }
885
886 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
887 {
888 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
889 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
890 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
891 int index;
892
893 while (vq) {
894 index = virtio_get_queue_index(vq);
895 if (!virtio_queue_get_num(vdev, index)) {
896 break;
897 }
898 if (index < proxy->nvqs_with_notifiers) {
899 virtio_pci_vq_vector_mask(proxy, index, vector);
900 }
901 vq = virtio_vector_next_queue(vq);
902 }
903 }
904
905 static void virtio_pci_vector_poll(PCIDevice *dev,
906 unsigned int vector_start,
907 unsigned int vector_end)
908 {
909 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
910 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
911 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
912 int queue_no;
913 unsigned int vector;
914 EventNotifier *notifier;
915 VirtQueue *vq;
916
917 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
918 if (!virtio_queue_get_num(vdev, queue_no)) {
919 break;
920 }
921 vector = virtio_queue_vector(vdev, queue_no);
922 if (vector < vector_start || vector >= vector_end ||
923 !msix_is_masked(dev, vector)) {
924 continue;
925 }
926 vq = virtio_get_queue(vdev, queue_no);
927 notifier = virtio_queue_get_guest_notifier(vq);
928 if (k->guest_notifier_pending) {
929 if (k->guest_notifier_pending(vdev, queue_no)) {
930 msix_set_pending(dev, vector);
931 }
932 } else if (event_notifier_test_and_clear(notifier)) {
933 msix_set_pending(dev, vector);
934 }
935 }
936 }
937
938 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
939 bool with_irqfd)
940 {
941 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
942 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
943 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
944 VirtQueue *vq = virtio_get_queue(vdev, n);
945 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
946
947 if (assign) {
948 int r = event_notifier_init(notifier, 0);
949 if (r < 0) {
950 return r;
951 }
952 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
953 } else {
954 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
955 event_notifier_cleanup(notifier);
956 }
957
958 if (!msix_enabled(&proxy->pci_dev) &&
959 vdev->use_guest_notifier_mask &&
960 vdc->guest_notifier_mask) {
961 vdc->guest_notifier_mask(vdev, n, !assign);
962 }
963
964 return 0;
965 }
966
967 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
968 {
969 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
970 return msix_enabled(&proxy->pci_dev);
971 }
972
973 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
974 {
975 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
976 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
977 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
978 int r, n;
979 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
980 kvm_msi_via_irqfd_enabled();
981
982 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
983
984 /* When deassigning, pass a consistent nvqs value
985 * to avoid leaking notifiers.
986 */
987 assert(assign || nvqs == proxy->nvqs_with_notifiers);
988
989 proxy->nvqs_with_notifiers = nvqs;
990
991 /* Must unset vector notifier while guest notifier is still assigned */
992 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
993 msix_unset_vector_notifiers(&proxy->pci_dev);
994 if (proxy->vector_irqfd) {
995 kvm_virtio_pci_vector_release(proxy, nvqs);
996 g_free(proxy->vector_irqfd);
997 proxy->vector_irqfd = NULL;
998 }
999 }
1000
1001 for (n = 0; n < nvqs; n++) {
1002 if (!virtio_queue_get_num(vdev, n)) {
1003 break;
1004 }
1005
1006 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
1007 if (r < 0) {
1008 goto assign_error;
1009 }
1010 }
1011
1012 /* Must set vector notifier after guest notifier has been assigned */
1013 if ((with_irqfd || k->guest_notifier_mask) && assign) {
1014 if (with_irqfd) {
1015 proxy->vector_irqfd =
1016 g_malloc0(sizeof(*proxy->vector_irqfd) *
1017 msix_nr_vectors_allocated(&proxy->pci_dev));
1018 r = kvm_virtio_pci_vector_use(proxy, nvqs);
1019 if (r < 0) {
1020 goto assign_error;
1021 }
1022 }
1023 r = msix_set_vector_notifiers(&proxy->pci_dev,
1024 virtio_pci_vector_unmask,
1025 virtio_pci_vector_mask,
1026 virtio_pci_vector_poll);
1027 if (r < 0) {
1028 goto notifiers_error;
1029 }
1030 }
1031
1032 return 0;
1033
1034 notifiers_error:
1035 if (with_irqfd) {
1036 assert(assign);
1037 kvm_virtio_pci_vector_release(proxy, nvqs);
1038 }
1039
1040 assign_error:
1041 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1042 assert(assign);
1043 while (--n >= 0) {
1044 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
1045 }
1046 return r;
1047 }
1048
1049 static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n,
1050 MemoryRegion *mr, bool assign)
1051 {
1052 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1053 int offset;
1054
1055 if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) ||
1056 virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) {
1057 return -1;
1058 }
1059
1060 if (assign) {
1061 offset = virtio_pci_queue_mem_mult(proxy) * n;
1062 memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1);
1063 } else {
1064 memory_region_del_subregion(&proxy->notify.mr, mr);
1065 }
1066
1067 return 0;
1068 }
1069
1070 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
1071 {
1072 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1073 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1074
1075 if (running) {
1076 /* Old QEMU versions did not set bus master enable on status write.
1077 * Detect DRIVER set and enable it.
1078 */
1079 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
1080 (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
1081 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1082 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
1083 proxy->pci_dev.config[PCI_COMMAND] |
1084 PCI_COMMAND_MASTER, 1);
1085 }
1086 virtio_pci_start_ioeventfd(proxy);
1087 } else {
1088 virtio_pci_stop_ioeventfd(proxy);
1089 }
1090 }
1091
1092 /*
1093 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1094 */
1095
1096 static int virtio_pci_query_nvectors(DeviceState *d)
1097 {
1098 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1099
1100 return proxy->nvectors;
1101 }
1102
1103 static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
1104 {
1105 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1106 PCIDevice *dev = &proxy->pci_dev;
1107
1108 return pci_get_address_space(dev);
1109 }
1110
1111 static bool virtio_pci_queue_enabled(DeviceState *d, int n)
1112 {
1113 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1114 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1115
1116 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1117 return proxy->vqs[n].enabled;
1118 }
1119
1120 return virtio_queue_enabled_legacy(vdev, n);
1121 }
1122
1123 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
1124 struct virtio_pci_cap *cap)
1125 {
1126 PCIDevice *dev = &proxy->pci_dev;
1127 int offset;
1128
1129 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0,
1130 cap->cap_len, &error_abort);
1131
1132 assert(cap->cap_len >= sizeof *cap);
1133 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
1134 cap->cap_len - PCI_CAP_FLAGS);
1135
1136 return offset;
1137 }
1138
1139 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
1140 unsigned size)
1141 {
1142 VirtIOPCIProxy *proxy = opaque;
1143 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1144 uint32_t val = 0;
1145 int i;
1146
1147 switch (addr) {
1148 case VIRTIO_PCI_COMMON_DFSELECT:
1149 val = proxy->dfselect;
1150 break;
1151 case VIRTIO_PCI_COMMON_DF:
1152 if (proxy->dfselect <= 1) {
1153 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1154
1155 val = (vdev->host_features & ~vdc->legacy_features) >>
1156 (32 * proxy->dfselect);
1157 }
1158 break;
1159 case VIRTIO_PCI_COMMON_GFSELECT:
1160 val = proxy->gfselect;
1161 break;
1162 case VIRTIO_PCI_COMMON_GF:
1163 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1164 val = proxy->guest_features[proxy->gfselect];
1165 }
1166 break;
1167 case VIRTIO_PCI_COMMON_MSIX:
1168 val = vdev->config_vector;
1169 break;
1170 case VIRTIO_PCI_COMMON_NUMQ:
1171 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
1172 if (virtio_queue_get_num(vdev, i)) {
1173 val = i + 1;
1174 }
1175 }
1176 break;
1177 case VIRTIO_PCI_COMMON_STATUS:
1178 val = vdev->status;
1179 break;
1180 case VIRTIO_PCI_COMMON_CFGGENERATION:
1181 val = vdev->generation;
1182 break;
1183 case VIRTIO_PCI_COMMON_Q_SELECT:
1184 val = vdev->queue_sel;
1185 break;
1186 case VIRTIO_PCI_COMMON_Q_SIZE:
1187 val = virtio_queue_get_num(vdev, vdev->queue_sel);
1188 break;
1189 case VIRTIO_PCI_COMMON_Q_MSIX:
1190 val = virtio_queue_vector(vdev, vdev->queue_sel);
1191 break;
1192 case VIRTIO_PCI_COMMON_Q_ENABLE:
1193 val = proxy->vqs[vdev->queue_sel].enabled;
1194 break;
1195 case VIRTIO_PCI_COMMON_Q_NOFF:
1196 /* Simply map queues in order */
1197 val = vdev->queue_sel;
1198 break;
1199 case VIRTIO_PCI_COMMON_Q_DESCLO:
1200 val = proxy->vqs[vdev->queue_sel].desc[0];
1201 break;
1202 case VIRTIO_PCI_COMMON_Q_DESCHI:
1203 val = proxy->vqs[vdev->queue_sel].desc[1];
1204 break;
1205 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1206 val = proxy->vqs[vdev->queue_sel].avail[0];
1207 break;
1208 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1209 val = proxy->vqs[vdev->queue_sel].avail[1];
1210 break;
1211 case VIRTIO_PCI_COMMON_Q_USEDLO:
1212 val = proxy->vqs[vdev->queue_sel].used[0];
1213 break;
1214 case VIRTIO_PCI_COMMON_Q_USEDHI:
1215 val = proxy->vqs[vdev->queue_sel].used[1];
1216 break;
1217 default:
1218 val = 0;
1219 }
1220
1221 return val;
1222 }
1223
1224 static void virtio_pci_common_write(void *opaque, hwaddr addr,
1225 uint64_t val, unsigned size)
1226 {
1227 VirtIOPCIProxy *proxy = opaque;
1228 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1229
1230 switch (addr) {
1231 case VIRTIO_PCI_COMMON_DFSELECT:
1232 proxy->dfselect = val;
1233 break;
1234 case VIRTIO_PCI_COMMON_GFSELECT:
1235 proxy->gfselect = val;
1236 break;
1237 case VIRTIO_PCI_COMMON_GF:
1238 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1239 proxy->guest_features[proxy->gfselect] = val;
1240 virtio_set_features(vdev,
1241 (((uint64_t)proxy->guest_features[1]) << 32) |
1242 proxy->guest_features[0]);
1243 }
1244 break;
1245 case VIRTIO_PCI_COMMON_MSIX:
1246 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
1247 /* Make it possible for guest to discover an error took place. */
1248 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1249 val = VIRTIO_NO_VECTOR;
1250 }
1251 vdev->config_vector = val;
1252 break;
1253 case VIRTIO_PCI_COMMON_STATUS:
1254 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1255 virtio_pci_stop_ioeventfd(proxy);
1256 }
1257
1258 virtio_set_status(vdev, val & 0xFF);
1259
1260 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
1261 virtio_pci_start_ioeventfd(proxy);
1262 }
1263
1264 if (vdev->status == 0) {
1265 virtio_pci_reset(DEVICE(proxy));
1266 }
1267
1268 break;
1269 case VIRTIO_PCI_COMMON_Q_SELECT:
1270 if (val < VIRTIO_QUEUE_MAX) {
1271 vdev->queue_sel = val;
1272 }
1273 break;
1274 case VIRTIO_PCI_COMMON_Q_SIZE:
1275 proxy->vqs[vdev->queue_sel].num = val;
1276 virtio_queue_set_num(vdev, vdev->queue_sel,
1277 proxy->vqs[vdev->queue_sel].num);
1278 break;
1279 case VIRTIO_PCI_COMMON_Q_MSIX:
1280 msix_vector_unuse(&proxy->pci_dev,
1281 virtio_queue_vector(vdev, vdev->queue_sel));
1282 /* Make it possible for guest to discover an error took place. */
1283 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1284 val = VIRTIO_NO_VECTOR;
1285 }
1286 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
1287 break;
1288 case VIRTIO_PCI_COMMON_Q_ENABLE:
1289 if (val == 1) {
1290 virtio_queue_set_num(vdev, vdev->queue_sel,
1291 proxy->vqs[vdev->queue_sel].num);
1292 virtio_queue_set_rings(vdev, vdev->queue_sel,
1293 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
1294 proxy->vqs[vdev->queue_sel].desc[0],
1295 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
1296 proxy->vqs[vdev->queue_sel].avail[0],
1297 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
1298 proxy->vqs[vdev->queue_sel].used[0]);
1299 proxy->vqs[vdev->queue_sel].enabled = 1;
1300 } else {
1301 virtio_error(vdev, "wrong value for queue_enable %"PRIx64, val);
1302 }
1303 break;
1304 case VIRTIO_PCI_COMMON_Q_DESCLO:
1305 proxy->vqs[vdev->queue_sel].desc[0] = val;
1306 break;
1307 case VIRTIO_PCI_COMMON_Q_DESCHI:
1308 proxy->vqs[vdev->queue_sel].desc[1] = val;
1309 break;
1310 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1311 proxy->vqs[vdev->queue_sel].avail[0] = val;
1312 break;
1313 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1314 proxy->vqs[vdev->queue_sel].avail[1] = val;
1315 break;
1316 case VIRTIO_PCI_COMMON_Q_USEDLO:
1317 proxy->vqs[vdev->queue_sel].used[0] = val;
1318 break;
1319 case VIRTIO_PCI_COMMON_Q_USEDHI:
1320 proxy->vqs[vdev->queue_sel].used[1] = val;
1321 break;
1322 default:
1323 break;
1324 }
1325 }
1326
1327
1328 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
1329 unsigned size)
1330 {
1331 return 0;
1332 }
1333
1334 static void virtio_pci_notify_write(void *opaque, hwaddr addr,
1335 uint64_t val, unsigned size)
1336 {
1337 VirtIOPCIProxy *proxy = opaque;
1338 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1339
1340 unsigned queue = addr / virtio_pci_queue_mem_mult(proxy);
1341
1342 if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) {
1343 virtio_queue_notify(vdev, queue);
1344 }
1345 }
1346
1347 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
1348 uint64_t val, unsigned size)
1349 {
1350 VirtIOPCIProxy *proxy = opaque;
1351 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1352
1353 unsigned queue = val;
1354
1355 if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) {
1356 virtio_queue_notify(vdev, queue);
1357 }
1358 }
1359
1360 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
1361 unsigned size)
1362 {
1363 VirtIOPCIProxy *proxy = opaque;
1364 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1365 uint64_t val = atomic_xchg(&vdev->isr, 0);
1366 pci_irq_deassert(&proxy->pci_dev);
1367
1368 return val;
1369 }
1370
1371 static void virtio_pci_isr_write(void *opaque, hwaddr addr,
1372 uint64_t val, unsigned size)
1373 {
1374 }
1375
1376 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
1377 unsigned size)
1378 {
1379 VirtIOPCIProxy *proxy = opaque;
1380 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1381 uint64_t val = 0;
1382
1383 if (vdev == NULL) {
1384 return val;
1385 }
1386
1387 switch (size) {
1388 case 1:
1389 val = virtio_config_modern_readb(vdev, addr);
1390 break;
1391 case 2:
1392 val = virtio_config_modern_readw(vdev, addr);
1393 break;
1394 case 4:
1395 val = virtio_config_modern_readl(vdev, addr);
1396 break;
1397 }
1398 return val;
1399 }
1400
1401 static void virtio_pci_device_write(void *opaque, hwaddr addr,
1402 uint64_t val, unsigned size)
1403 {
1404 VirtIOPCIProxy *proxy = opaque;
1405 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1406
1407 if (vdev == NULL) {
1408 return;
1409 }
1410
1411 switch (size) {
1412 case 1:
1413 virtio_config_modern_writeb(vdev, addr, val);
1414 break;
1415 case 2:
1416 virtio_config_modern_writew(vdev, addr, val);
1417 break;
1418 case 4:
1419 virtio_config_modern_writel(vdev, addr, val);
1420 break;
1421 }
1422 }
1423
1424 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
1425 {
1426 static const MemoryRegionOps common_ops = {
1427 .read = virtio_pci_common_read,
1428 .write = virtio_pci_common_write,
1429 .impl = {
1430 .min_access_size = 1,
1431 .max_access_size = 4,
1432 },
1433 .endianness = DEVICE_LITTLE_ENDIAN,
1434 };
1435 static const MemoryRegionOps isr_ops = {
1436 .read = virtio_pci_isr_read,
1437 .write = virtio_pci_isr_write,
1438 .impl = {
1439 .min_access_size = 1,
1440 .max_access_size = 4,
1441 },
1442 .endianness = DEVICE_LITTLE_ENDIAN,
1443 };
1444 static const MemoryRegionOps device_ops = {
1445 .read = virtio_pci_device_read,
1446 .write = virtio_pci_device_write,
1447 .impl = {
1448 .min_access_size = 1,
1449 .max_access_size = 4,
1450 },
1451 .endianness = DEVICE_LITTLE_ENDIAN,
1452 };
1453 static const MemoryRegionOps notify_ops = {
1454 .read = virtio_pci_notify_read,
1455 .write = virtio_pci_notify_write,
1456 .impl = {
1457 .min_access_size = 1,
1458 .max_access_size = 4,
1459 },
1460 .endianness = DEVICE_LITTLE_ENDIAN,
1461 };
1462 static const MemoryRegionOps notify_pio_ops = {
1463 .read = virtio_pci_notify_read,
1464 .write = virtio_pci_notify_write_pio,
1465 .impl = {
1466 .min_access_size = 1,
1467 .max_access_size = 4,
1468 },
1469 .endianness = DEVICE_LITTLE_ENDIAN,
1470 };
1471
1472
1473 memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
1474 &common_ops,
1475 proxy,
1476 "virtio-pci-common",
1477 proxy->common.size);
1478
1479 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
1480 &isr_ops,
1481 proxy,
1482 "virtio-pci-isr",
1483 proxy->isr.size);
1484
1485 memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
1486 &device_ops,
1487 proxy,
1488 "virtio-pci-device",
1489 proxy->device.size);
1490
1491 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
1492 &notify_ops,
1493 proxy,
1494 "virtio-pci-notify",
1495 proxy->notify.size);
1496
1497 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
1498 &notify_pio_ops,
1499 proxy,
1500 "virtio-pci-notify-pio",
1501 proxy->notify_pio.size);
1502 }
1503
1504 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
1505 VirtIOPCIRegion *region,
1506 struct virtio_pci_cap *cap,
1507 MemoryRegion *mr,
1508 uint8_t bar)
1509 {
1510 memory_region_add_subregion(mr, region->offset, &region->mr);
1511
1512 cap->cfg_type = region->type;
1513 cap->bar = bar;
1514 cap->offset = cpu_to_le32(region->offset);
1515 cap->length = cpu_to_le32(region->size);
1516 virtio_pci_add_mem_cap(proxy, cap);
1517
1518 }
1519
1520 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
1521 VirtIOPCIRegion *region,
1522 struct virtio_pci_cap *cap)
1523 {
1524 virtio_pci_modern_region_map(proxy, region, cap,
1525 &proxy->modern_bar, proxy->modern_mem_bar_idx);
1526 }
1527
1528 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
1529 VirtIOPCIRegion *region,
1530 struct virtio_pci_cap *cap)
1531 {
1532 virtio_pci_modern_region_map(proxy, region, cap,
1533 &proxy->io_bar, proxy->modern_io_bar_idx);
1534 }
1535
1536 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
1537 VirtIOPCIRegion *region)
1538 {
1539 memory_region_del_subregion(&proxy->modern_bar,
1540 &region->mr);
1541 }
1542
1543 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
1544 VirtIOPCIRegion *region)
1545 {
1546 memory_region_del_subregion(&proxy->io_bar,
1547 &region->mr);
1548 }
1549
1550 static void virtio_pci_pre_plugged(DeviceState *d, Error **errp)
1551 {
1552 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1553 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1554
1555 if (virtio_pci_modern(proxy)) {
1556 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1557 }
1558
1559 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
1560 }
1561
1562 /* This is called by virtio-bus just after the device is plugged. */
1563 static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
1564 {
1565 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1566 VirtioBusState *bus = &proxy->bus;
1567 bool legacy = virtio_pci_legacy(proxy);
1568 bool modern;
1569 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1570 uint8_t *config;
1571 uint32_t size;
1572 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1573
1574 /*
1575 * Virtio capabilities present without
1576 * VIRTIO_F_VERSION_1 confuses guests
1577 */
1578 if (!proxy->ignore_backend_features &&
1579 !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1580 virtio_pci_disable_modern(proxy);
1581
1582 if (!legacy) {
1583 error_setg(errp, "Device doesn't support modern mode, and legacy"
1584 " mode is disabled");
1585 error_append_hint(errp, "Set disable-legacy to off\n");
1586
1587 return;
1588 }
1589 }
1590
1591 modern = virtio_pci_modern(proxy);
1592
1593 config = proxy->pci_dev.config;
1594 if (proxy->class_code) {
1595 pci_config_set_class(config, proxy->class_code);
1596 }
1597
1598 if (legacy) {
1599 if (!virtio_legacy_allowed(vdev)) {
1600 error_setg(errp, "device is modern-only, use disable-legacy=on");
1601 return;
1602 }
1603 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1604 error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
1605 " neither legacy nor transitional device");
1606 return ;
1607 }
1608 /*
1609 * Legacy and transitional devices use specific subsystem IDs.
1610 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
1611 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
1612 */
1613 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
1614 } else {
1615 /* pure virtio-1.0 */
1616 pci_set_word(config + PCI_VENDOR_ID,
1617 PCI_VENDOR_ID_REDHAT_QUMRANET);
1618 pci_set_word(config + PCI_DEVICE_ID,
1619 0x1040 + virtio_bus_get_vdev_id(bus));
1620 pci_config_set_revision(config, 1);
1621 }
1622 config[PCI_INTERRUPT_PIN] = 1;
1623
1624
1625 if (modern) {
1626 struct virtio_pci_cap cap = {
1627 .cap_len = sizeof cap,
1628 };
1629 struct virtio_pci_notify_cap notify = {
1630 .cap.cap_len = sizeof notify,
1631 .notify_off_multiplier =
1632 cpu_to_le32(virtio_pci_queue_mem_mult(proxy)),
1633 };
1634 struct virtio_pci_cfg_cap cfg = {
1635 .cap.cap_len = sizeof cfg,
1636 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
1637 };
1638 struct virtio_pci_notify_cap notify_pio = {
1639 .cap.cap_len = sizeof notify,
1640 .notify_off_multiplier = cpu_to_le32(0x0),
1641 };
1642
1643 struct virtio_pci_cfg_cap *cfg_mask;
1644
1645 virtio_pci_modern_regions_init(proxy);
1646
1647 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
1648 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
1649 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
1650 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
1651
1652 if (modern_pio) {
1653 memory_region_init(&proxy->io_bar, OBJECT(proxy),
1654 "virtio-pci-io", 0x4);
1655
1656 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
1657 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
1658
1659 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
1660 &notify_pio.cap);
1661 }
1662
1663 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx,
1664 PCI_BASE_ADDRESS_SPACE_MEMORY |
1665 PCI_BASE_ADDRESS_MEM_PREFETCH |
1666 PCI_BASE_ADDRESS_MEM_TYPE_64,
1667 &proxy->modern_bar);
1668
1669 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
1670 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
1671 pci_set_byte(&cfg_mask->cap.bar, ~0x0);
1672 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
1673 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
1674 pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
1675 }
1676
1677 if (proxy->nvectors) {
1678 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
1679 proxy->msix_bar_idx, NULL);
1680 if (err) {
1681 /* Notice when a system that supports MSIx can't initialize it */
1682 if (err != -ENOTSUP) {
1683 warn_report("unable to init msix vectors to %" PRIu32,
1684 proxy->nvectors);
1685 }
1686 proxy->nvectors = 0;
1687 }
1688 }
1689
1690 proxy->pci_dev.config_write = virtio_write_config;
1691 proxy->pci_dev.config_read = virtio_read_config;
1692
1693 if (legacy) {
1694 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
1695 + virtio_bus_get_vdev_config_len(bus);
1696 size = pow2ceil(size);
1697
1698 memory_region_init_io(&proxy->bar, OBJECT(proxy),
1699 &virtio_pci_config_ops,
1700 proxy, "virtio-pci", size);
1701
1702 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
1703 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
1704 }
1705 }
1706
1707 static void virtio_pci_device_unplugged(DeviceState *d)
1708 {
1709 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1710 bool modern = virtio_pci_modern(proxy);
1711 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1712
1713 virtio_pci_stop_ioeventfd(proxy);
1714
1715 if (modern) {
1716 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
1717 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
1718 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
1719 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
1720 if (modern_pio) {
1721 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
1722 }
1723 }
1724 }
1725
1726 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
1727 {
1728 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1729 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1730 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
1731 !pci_bus_is_root(pci_get_bus(pci_dev));
1732
1733 if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
1734 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
1735 }
1736
1737 /*
1738 * virtio pci bar layout used by default.
1739 * subclasses can re-arrange things if needed.
1740 *
1741 * region 0 -- virtio legacy io bar
1742 * region 1 -- msi-x bar
1743 * region 2 -- virtio modern io bar (off by default)
1744 * region 4+5 -- virtio modern memory (64bit) bar
1745 *
1746 */
1747 proxy->legacy_io_bar_idx = 0;
1748 proxy->msix_bar_idx = 1;
1749 proxy->modern_io_bar_idx = 2;
1750 proxy->modern_mem_bar_idx = 4;
1751
1752 proxy->common.offset = 0x0;
1753 proxy->common.size = 0x1000;
1754 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
1755
1756 proxy->isr.offset = 0x1000;
1757 proxy->isr.size = 0x1000;
1758 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
1759
1760 proxy->device.offset = 0x2000;
1761 proxy->device.size = 0x1000;
1762 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
1763
1764 proxy->notify.offset = 0x3000;
1765 proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
1766 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1767
1768 proxy->notify_pio.offset = 0x0;
1769 proxy->notify_pio.size = 0x4;
1770 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1771
1772 /* subclasses can enforce modern, so do this unconditionally */
1773 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
1774 /* PCI BAR regions must be powers of 2 */
1775 pow2ceil(proxy->notify.offset + proxy->notify.size));
1776
1777 if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
1778 proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
1779 }
1780
1781 if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) {
1782 error_setg(errp, "device cannot work as neither modern nor legacy mode"
1783 " is enabled");
1784 error_append_hint(errp, "Set either disable-modern or disable-legacy"
1785 " to off\n");
1786 return;
1787 }
1788
1789 if (pcie_port && pci_is_express(pci_dev)) {
1790 int pos;
1791
1792 pos = pcie_endpoint_cap_init(pci_dev, 0);
1793 assert(pos > 0);
1794
1795 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0,
1796 PCI_PM_SIZEOF, errp);
1797 if (pos < 0) {
1798 return;
1799 }
1800
1801 pci_dev->exp.pm_cap = pos;
1802
1803 /*
1804 * Indicates that this function complies with revision 1.2 of the
1805 * PCI Power Management Interface Specification.
1806 */
1807 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
1808
1809 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) {
1810 /* Init error enabling flags */
1811 pcie_cap_deverr_init(pci_dev);
1812 }
1813
1814 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) {
1815 /* Init Link Control Register */
1816 pcie_cap_lnkctl_init(pci_dev);
1817 }
1818
1819 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) {
1820 /* Init Power Management Control Register */
1821 pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL,
1822 PCI_PM_CTRL_STATE_MASK);
1823 }
1824
1825 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
1826 pcie_ats_init(pci_dev, 256);
1827 }
1828
1829 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
1830 /* Set Function Level Reset capability bit */
1831 pcie_cap_flr_init(pci_dev);
1832 }
1833 } else {
1834 /*
1835 * make future invocations of pci_is_express() return false
1836 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
1837 */
1838 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
1839 }
1840
1841 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
1842 if (k->realize) {
1843 k->realize(proxy, errp);
1844 }
1845 }
1846
1847 static void virtio_pci_exit(PCIDevice *pci_dev)
1848 {
1849 msix_uninit_exclusive_bar(pci_dev);
1850 }
1851
1852 static void virtio_pci_reset(DeviceState *qdev)
1853 {
1854 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1855 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1856 PCIDevice *dev = PCI_DEVICE(qdev);
1857 int i;
1858
1859 virtio_pci_stop_ioeventfd(proxy);
1860 virtio_bus_reset(bus);
1861 msix_unuse_all_vectors(&proxy->pci_dev);
1862
1863 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1864 proxy->vqs[i].enabled = 0;
1865 proxy->vqs[i].num = 0;
1866 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
1867 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
1868 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
1869 }
1870
1871 if (pci_is_express(dev)) {
1872 pcie_cap_deverr_reset(dev);
1873 pcie_cap_lnkctl_reset(dev);
1874
1875 pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0);
1876 }
1877 }
1878
1879 static Property virtio_pci_properties[] = {
1880 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
1881 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
1882 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
1883 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
1884 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
1885 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
1886 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
1887 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
1888 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
1889 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
1890 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
1891 ignore_backend_features, false),
1892 DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
1893 VIRTIO_PCI_FLAG_ATS_BIT, false),
1894 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags,
1895 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true),
1896 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags,
1897 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true),
1898 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags,
1899 VIRTIO_PCI_FLAG_INIT_PM_BIT, true),
1900 DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy, flags,
1901 VIRTIO_PCI_FLAG_INIT_FLR_BIT, true),
1902 DEFINE_PROP_END_OF_LIST(),
1903 };
1904
1905 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
1906 {
1907 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
1908 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1909 PCIDevice *pci_dev = &proxy->pci_dev;
1910
1911 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
1912 virtio_pci_modern(proxy)) {
1913 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
1914 }
1915
1916 vpciklass->parent_dc_realize(qdev, errp);
1917 }
1918
1919 static void virtio_pci_class_init(ObjectClass *klass, void *data)
1920 {
1921 DeviceClass *dc = DEVICE_CLASS(klass);
1922 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1923 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1924
1925 device_class_set_props(dc, virtio_pci_properties);
1926 k->realize = virtio_pci_realize;
1927 k->exit = virtio_pci_exit;
1928 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1929 k->revision = VIRTIO_PCI_ABI_VERSION;
1930 k->class_id = PCI_CLASS_OTHERS;
1931 device_class_set_parent_realize(dc, virtio_pci_dc_realize,
1932 &vpciklass->parent_dc_realize);
1933 dc->reset = virtio_pci_reset;
1934 }
1935
1936 static const TypeInfo virtio_pci_info = {
1937 .name = TYPE_VIRTIO_PCI,
1938 .parent = TYPE_PCI_DEVICE,
1939 .instance_size = sizeof(VirtIOPCIProxy),
1940 .class_init = virtio_pci_class_init,
1941 .class_size = sizeof(VirtioPCIClass),
1942 .abstract = true,
1943 };
1944
1945 static Property virtio_pci_generic_properties[] = {
1946 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
1947 ON_OFF_AUTO_AUTO),
1948 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
1949 DEFINE_PROP_END_OF_LIST(),
1950 };
1951
1952 static void virtio_pci_base_class_init(ObjectClass *klass, void *data)
1953 {
1954 const VirtioPCIDeviceTypeInfo *t = data;
1955 if (t->class_init) {
1956 t->class_init(klass, NULL);
1957 }
1958 }
1959
1960 static void virtio_pci_generic_class_init(ObjectClass *klass, void *data)
1961 {
1962 DeviceClass *dc = DEVICE_CLASS(klass);
1963
1964 device_class_set_props(dc, virtio_pci_generic_properties);
1965 }
1966
1967 static void virtio_pci_transitional_instance_init(Object *obj)
1968 {
1969 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
1970
1971 proxy->disable_legacy = ON_OFF_AUTO_OFF;
1972 proxy->disable_modern = false;
1973 }
1974
1975 static void virtio_pci_non_transitional_instance_init(Object *obj)
1976 {
1977 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
1978
1979 proxy->disable_legacy = ON_OFF_AUTO_ON;
1980 proxy->disable_modern = false;
1981 }
1982
1983 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
1984 {
1985 char *base_name = NULL;
1986 TypeInfo base_type_info = {
1987 .name = t->base_name,
1988 .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI,
1989 .instance_size = t->instance_size,
1990 .instance_init = t->instance_init,
1991 .class_size = t->class_size,
1992 .abstract = true,
1993 .interfaces = t->interfaces,
1994 };
1995 TypeInfo generic_type_info = {
1996 .name = t->generic_name,
1997 .parent = base_type_info.name,
1998 .class_init = virtio_pci_generic_class_init,
1999 .interfaces = (InterfaceInfo[]) {
2000 { INTERFACE_PCIE_DEVICE },
2001 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2002 { }
2003 },
2004 };
2005
2006 if (!base_type_info.name) {
2007 /* No base type -> register a single generic device type */
2008 /* use intermediate %s-base-type to add generic device props */
2009 base_name = g_strdup_printf("%s-base-type", t->generic_name);
2010 base_type_info.name = base_name;
2011 base_type_info.class_init = virtio_pci_generic_class_init;
2012
2013 generic_type_info.parent = base_name;
2014 generic_type_info.class_init = virtio_pci_base_class_init;
2015 generic_type_info.class_data = (void *)t;
2016
2017 assert(!t->non_transitional_name);
2018 assert(!t->transitional_name);
2019 } else {
2020 base_type_info.class_init = virtio_pci_base_class_init;
2021 base_type_info.class_data = (void *)t;
2022 }
2023
2024 type_register(&base_type_info);
2025 if (generic_type_info.name) {
2026 type_register(&generic_type_info);
2027 }
2028
2029 if (t->non_transitional_name) {
2030 const TypeInfo non_transitional_type_info = {
2031 .name = t->non_transitional_name,
2032 .parent = base_type_info.name,
2033 .instance_init = virtio_pci_non_transitional_instance_init,
2034 .interfaces = (InterfaceInfo[]) {
2035 { INTERFACE_PCIE_DEVICE },
2036 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2037 { }
2038 },
2039 };
2040 type_register(&non_transitional_type_info);
2041 }
2042
2043 if (t->transitional_name) {
2044 const TypeInfo transitional_type_info = {
2045 .name = t->transitional_name,
2046 .parent = base_type_info.name,
2047 .instance_init = virtio_pci_transitional_instance_init,
2048 .interfaces = (InterfaceInfo[]) {
2049 /*
2050 * Transitional virtio devices work only as Conventional PCI
2051 * devices because they require PIO ports.
2052 */
2053 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2054 { }
2055 },
2056 };
2057 type_register(&transitional_type_info);
2058 }
2059 g_free(base_name);
2060 }
2061
2062 unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues)
2063 {
2064 /*
2065 * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted
2066 * virtqueue buffers can handle their completion. When a different vCPU
2067 * handles completion it may need to IPI the vCPU that submitted the
2068 * request and this adds overhead.
2069 *
2070 * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in
2071 * guests with very many vCPUs and a device that is only used by a few
2072 * vCPUs. Unfortunately optimizing that case requires manual pinning inside
2073 * the guest, so those users might as well manually set the number of
2074 * queues. There is no upper limit that can be applied automatically and
2075 * doing so arbitrarily would result in a sudden performance drop once the
2076 * threshold number of vCPUs is exceeded.
2077 */
2078 unsigned num_queues = current_machine->smp.cpus;
2079
2080 /*
2081 * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the
2082 * config change interrupt and the fixed virtqueues must be taken into
2083 * account too.
2084 */
2085 num_queues = MIN(num_queues, PCI_MSIX_FLAGS_QSIZE - fixed_queues);
2086
2087 /*
2088 * There is a limit to how many virtqueues a device can have.
2089 */
2090 return MIN(num_queues, VIRTIO_QUEUE_MAX - fixed_queues);
2091 }
2092
2093 /* virtio-pci-bus */
2094
2095 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
2096 VirtIOPCIProxy *dev)
2097 {
2098 DeviceState *qdev = DEVICE(dev);
2099 char virtio_bus_name[] = "virtio-bus";
2100
2101 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
2102 virtio_bus_name);
2103 }
2104
2105 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
2106 {
2107 BusClass *bus_class = BUS_CLASS(klass);
2108 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
2109 bus_class->max_dev = 1;
2110 k->notify = virtio_pci_notify;
2111 k->save_config = virtio_pci_save_config;
2112 k->load_config = virtio_pci_load_config;
2113 k->save_queue = virtio_pci_save_queue;
2114 k->load_queue = virtio_pci_load_queue;
2115 k->save_extra_state = virtio_pci_save_extra_state;
2116 k->load_extra_state = virtio_pci_load_extra_state;
2117 k->has_extra_state = virtio_pci_has_extra_state;
2118 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
2119 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
2120 k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr;
2121 k->vmstate_change = virtio_pci_vmstate_change;
2122 k->pre_plugged = virtio_pci_pre_plugged;
2123 k->device_plugged = virtio_pci_device_plugged;
2124 k->device_unplugged = virtio_pci_device_unplugged;
2125 k->query_nvectors = virtio_pci_query_nvectors;
2126 k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
2127 k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
2128 k->get_dma_as = virtio_pci_get_dma_as;
2129 k->queue_enabled = virtio_pci_queue_enabled;
2130 }
2131
2132 static const TypeInfo virtio_pci_bus_info = {
2133 .name = TYPE_VIRTIO_PCI_BUS,
2134 .parent = TYPE_VIRTIO_BUS,
2135 .instance_size = sizeof(VirtioPCIBusState),
2136 .class_size = sizeof(VirtioPCIBusClass),
2137 .class_init = virtio_pci_bus_class_init,
2138 };
2139
2140 static void virtio_pci_register_types(void)
2141 {
2142 /* Base types: */
2143 type_register_static(&virtio_pci_bus_info);
2144 type_register_static(&virtio_pci_info);
2145 }
2146
2147 type_init(virtio_pci_register_types)
2148