Merge remote-tracking branch 'remotes/stefanha/tags/tracing-pull-request' into staging
[qemu.git] / hw / virtio / virtio-mmio.c
1 /*
2 * Virtio MMIO bindings
3 *
4 * Copyright (c) 2011 Linaro Limited
5 *
6 * Author:
7 * Peter Maydell <peter.maydell@linaro.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include "qemu/osdep.h"
23 #include "standard-headers/linux/virtio_mmio.h"
24 #include "hw/sysbus.h"
25 #include "hw/virtio/virtio.h"
26 #include "qemu/host-utils.h"
27 #include "sysemu/kvm.h"
28 #include "hw/virtio/virtio-bus.h"
29 #include "qemu/error-report.h"
30
31 /* #define DEBUG_VIRTIO_MMIO */
32
33 #ifdef DEBUG_VIRTIO_MMIO
34
35 #define DPRINTF(fmt, ...) \
36 do { printf("virtio_mmio: " fmt , ## __VA_ARGS__); } while (0)
37 #else
38 #define DPRINTF(fmt, ...) do {} while (0)
39 #endif
40
41 /* QOM macros */
42 /* virtio-mmio-bus */
43 #define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
44 #define VIRTIO_MMIO_BUS(obj) \
45 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
46 #define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
47 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
48 #define VIRTIO_MMIO_BUS_CLASS(klass) \
49 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
50
51 /* virtio-mmio */
52 #define TYPE_VIRTIO_MMIO "virtio-mmio"
53 #define VIRTIO_MMIO(obj) \
54 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
55
56 #define VIRT_MAGIC 0x74726976 /* 'virt' */
57 #define VIRT_VERSION 1
58 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
59
60 typedef struct {
61 /* Generic */
62 SysBusDevice parent_obj;
63 MemoryRegion iomem;
64 qemu_irq irq;
65 /* Guest accessible state needing migration and reset */
66 uint32_t host_features_sel;
67 uint32_t guest_features_sel;
68 uint32_t guest_page_shift;
69 /* virtio-bus */
70 VirtioBusState bus;
71 bool format_transport_address;
72 } VirtIOMMIOProxy;
73
74 static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
75 {
76 return kvm_eventfds_enabled();
77 }
78
79 static int virtio_mmio_ioeventfd_assign(DeviceState *d,
80 EventNotifier *notifier,
81 int n, bool assign)
82 {
83 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
84
85 if (assign) {
86 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
87 true, n, notifier);
88 } else {
89 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
90 true, n, notifier);
91 }
92 return 0;
93 }
94
95 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
96 {
97 virtio_bus_start_ioeventfd(&proxy->bus);
98 }
99
100 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
101 {
102 virtio_bus_stop_ioeventfd(&proxy->bus);
103 }
104
105 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
106 {
107 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
108 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
109
110 DPRINTF("virtio_mmio_read offset 0x%x\n", (int)offset);
111
112 if (!vdev) {
113 /* If no backend is present, we treat most registers as
114 * read-as-zero, except for the magic number, version and
115 * vendor ID. This is not strictly sanctioned by the virtio
116 * spec, but it allows us to provide transports with no backend
117 * plugged in which don't confuse Linux's virtio code: the
118 * probe won't complain about the bad magic number, but the
119 * device ID of zero means no backend will claim it.
120 */
121 switch (offset) {
122 case VIRTIO_MMIO_MAGIC_VALUE:
123 return VIRT_MAGIC;
124 case VIRTIO_MMIO_VERSION:
125 return VIRT_VERSION;
126 case VIRTIO_MMIO_VENDOR_ID:
127 return VIRT_VENDOR;
128 default:
129 return 0;
130 }
131 }
132
133 if (offset >= VIRTIO_MMIO_CONFIG) {
134 offset -= VIRTIO_MMIO_CONFIG;
135 switch (size) {
136 case 1:
137 return virtio_config_readb(vdev, offset);
138 case 2:
139 return virtio_config_readw(vdev, offset);
140 case 4:
141 return virtio_config_readl(vdev, offset);
142 default:
143 abort();
144 }
145 }
146 if (size != 4) {
147 DPRINTF("wrong size access to register!\n");
148 return 0;
149 }
150 switch (offset) {
151 case VIRTIO_MMIO_MAGIC_VALUE:
152 return VIRT_MAGIC;
153 case VIRTIO_MMIO_VERSION:
154 return VIRT_VERSION;
155 case VIRTIO_MMIO_DEVICE_ID:
156 return vdev->device_id;
157 case VIRTIO_MMIO_VENDOR_ID:
158 return VIRT_VENDOR;
159 case VIRTIO_MMIO_DEVICE_FEATURES:
160 if (proxy->host_features_sel) {
161 return 0;
162 }
163 return vdev->host_features;
164 case VIRTIO_MMIO_QUEUE_NUM_MAX:
165 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
166 return 0;
167 }
168 return VIRTQUEUE_MAX_SIZE;
169 case VIRTIO_MMIO_QUEUE_PFN:
170 return virtio_queue_get_addr(vdev, vdev->queue_sel)
171 >> proxy->guest_page_shift;
172 case VIRTIO_MMIO_INTERRUPT_STATUS:
173 return atomic_read(&vdev->isr);
174 case VIRTIO_MMIO_STATUS:
175 return vdev->status;
176 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
177 case VIRTIO_MMIO_DRIVER_FEATURES:
178 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
179 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
180 case VIRTIO_MMIO_QUEUE_SEL:
181 case VIRTIO_MMIO_QUEUE_NUM:
182 case VIRTIO_MMIO_QUEUE_ALIGN:
183 case VIRTIO_MMIO_QUEUE_NOTIFY:
184 case VIRTIO_MMIO_INTERRUPT_ACK:
185 DPRINTF("read of write-only register\n");
186 return 0;
187 default:
188 DPRINTF("bad register offset\n");
189 return 0;
190 }
191 return 0;
192 }
193
194 static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
195 unsigned size)
196 {
197 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
198 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
199
200 DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64 "\n",
201 (int)offset, value);
202
203 if (!vdev) {
204 /* If no backend is present, we just make all registers
205 * write-ignored. This allows us to provide transports with
206 * no backend plugged in.
207 */
208 return;
209 }
210
211 if (offset >= VIRTIO_MMIO_CONFIG) {
212 offset -= VIRTIO_MMIO_CONFIG;
213 switch (size) {
214 case 1:
215 virtio_config_writeb(vdev, offset, value);
216 break;
217 case 2:
218 virtio_config_writew(vdev, offset, value);
219 break;
220 case 4:
221 virtio_config_writel(vdev, offset, value);
222 break;
223 default:
224 abort();
225 }
226 return;
227 }
228 if (size != 4) {
229 DPRINTF("wrong size access to register!\n");
230 return;
231 }
232 switch (offset) {
233 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
234 proxy->host_features_sel = value;
235 break;
236 case VIRTIO_MMIO_DRIVER_FEATURES:
237 if (!proxy->guest_features_sel) {
238 virtio_set_features(vdev, value);
239 }
240 break;
241 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
242 proxy->guest_features_sel = value;
243 break;
244 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
245 proxy->guest_page_shift = ctz32(value);
246 if (proxy->guest_page_shift > 31) {
247 proxy->guest_page_shift = 0;
248 }
249 DPRINTF("guest page size %" PRIx64 " shift %d\n", value,
250 proxy->guest_page_shift);
251 break;
252 case VIRTIO_MMIO_QUEUE_SEL:
253 if (value < VIRTIO_QUEUE_MAX) {
254 vdev->queue_sel = value;
255 }
256 break;
257 case VIRTIO_MMIO_QUEUE_NUM:
258 DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE);
259 virtio_queue_set_num(vdev, vdev->queue_sel, value);
260 /* Note: only call this function for legacy devices */
261 virtio_queue_update_rings(vdev, vdev->queue_sel);
262 break;
263 case VIRTIO_MMIO_QUEUE_ALIGN:
264 /* Note: this is only valid for legacy devices */
265 virtio_queue_set_align(vdev, vdev->queue_sel, value);
266 break;
267 case VIRTIO_MMIO_QUEUE_PFN:
268 if (value == 0) {
269 virtio_reset(vdev);
270 } else {
271 virtio_queue_set_addr(vdev, vdev->queue_sel,
272 value << proxy->guest_page_shift);
273 }
274 break;
275 case VIRTIO_MMIO_QUEUE_NOTIFY:
276 if (value < VIRTIO_QUEUE_MAX) {
277 virtio_queue_notify(vdev, value);
278 }
279 break;
280 case VIRTIO_MMIO_INTERRUPT_ACK:
281 atomic_and(&vdev->isr, ~value);
282 virtio_update_irq(vdev);
283 break;
284 case VIRTIO_MMIO_STATUS:
285 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
286 virtio_mmio_stop_ioeventfd(proxy);
287 }
288
289 virtio_set_status(vdev, value & 0xff);
290
291 if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
292 virtio_mmio_start_ioeventfd(proxy);
293 }
294
295 if (vdev->status == 0) {
296 virtio_reset(vdev);
297 }
298 break;
299 case VIRTIO_MMIO_MAGIC_VALUE:
300 case VIRTIO_MMIO_VERSION:
301 case VIRTIO_MMIO_DEVICE_ID:
302 case VIRTIO_MMIO_VENDOR_ID:
303 case VIRTIO_MMIO_DEVICE_FEATURES:
304 case VIRTIO_MMIO_QUEUE_NUM_MAX:
305 case VIRTIO_MMIO_INTERRUPT_STATUS:
306 DPRINTF("write to readonly register\n");
307 break;
308
309 default:
310 DPRINTF("bad register offset\n");
311 }
312 }
313
314 static const MemoryRegionOps virtio_mem_ops = {
315 .read = virtio_mmio_read,
316 .write = virtio_mmio_write,
317 .endianness = DEVICE_NATIVE_ENDIAN,
318 };
319
320 static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
321 {
322 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
323 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
324 int level;
325
326 if (!vdev) {
327 return;
328 }
329 level = (atomic_read(&vdev->isr) != 0);
330 DPRINTF("virtio_mmio setting IRQ %d\n", level);
331 qemu_set_irq(proxy->irq, level);
332 }
333
334 static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
335 {
336 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
337
338 proxy->host_features_sel = qemu_get_be32(f);
339 proxy->guest_features_sel = qemu_get_be32(f);
340 proxy->guest_page_shift = qemu_get_be32(f);
341 return 0;
342 }
343
344 static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
345 {
346 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
347
348 qemu_put_be32(f, proxy->host_features_sel);
349 qemu_put_be32(f, proxy->guest_features_sel);
350 qemu_put_be32(f, proxy->guest_page_shift);
351 }
352
353 static void virtio_mmio_reset(DeviceState *d)
354 {
355 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
356
357 virtio_mmio_stop_ioeventfd(proxy);
358 virtio_bus_reset(&proxy->bus);
359 proxy->host_features_sel = 0;
360 proxy->guest_features_sel = 0;
361 proxy->guest_page_shift = 0;
362 }
363
364 static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
365 bool with_irqfd)
366 {
367 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
368 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
369 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
370 VirtQueue *vq = virtio_get_queue(vdev, n);
371 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
372
373 if (assign) {
374 int r = event_notifier_init(notifier, 0);
375 if (r < 0) {
376 return r;
377 }
378 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
379 } else {
380 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
381 event_notifier_cleanup(notifier);
382 }
383
384 if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
385 vdc->guest_notifier_mask(vdev, n, !assign);
386 }
387
388 return 0;
389 }
390
391 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
392 bool assign)
393 {
394 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
395 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
396 /* TODO: need to check if kvm-arm supports irqfd */
397 bool with_irqfd = false;
398 int r, n;
399
400 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
401
402 for (n = 0; n < nvqs; n++) {
403 if (!virtio_queue_get_num(vdev, n)) {
404 break;
405 }
406
407 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
408 if (r < 0) {
409 goto assign_error;
410 }
411 }
412
413 return 0;
414
415 assign_error:
416 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
417 assert(assign);
418 while (--n >= 0) {
419 virtio_mmio_set_guest_notifier(d, n, !assign, false);
420 }
421 return r;
422 }
423
424 /* virtio-mmio device */
425
426 static Property virtio_mmio_properties[] = {
427 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
428 format_transport_address, true),
429 DEFINE_PROP_END_OF_LIST(),
430 };
431
432 static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
433 {
434 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
435 SysBusDevice *sbd = SYS_BUS_DEVICE(d);
436
437 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
438 d, NULL);
439 sysbus_init_irq(sbd, &proxy->irq);
440 memory_region_init_io(&proxy->iomem, OBJECT(d), &virtio_mem_ops, proxy,
441 TYPE_VIRTIO_MMIO, 0x200);
442 sysbus_init_mmio(sbd, &proxy->iomem);
443 }
444
445 static void virtio_mmio_class_init(ObjectClass *klass, void *data)
446 {
447 DeviceClass *dc = DEVICE_CLASS(klass);
448
449 dc->realize = virtio_mmio_realizefn;
450 dc->reset = virtio_mmio_reset;
451 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
452 dc->props = virtio_mmio_properties;
453 }
454
455 static const TypeInfo virtio_mmio_info = {
456 .name = TYPE_VIRTIO_MMIO,
457 .parent = TYPE_SYS_BUS_DEVICE,
458 .instance_size = sizeof(VirtIOMMIOProxy),
459 .class_init = virtio_mmio_class_init,
460 };
461
462 /* virtio-mmio-bus. */
463
464 static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
465 {
466 BusState *virtio_mmio_bus;
467 VirtIOMMIOProxy *virtio_mmio_proxy;
468 char *proxy_path;
469 SysBusDevice *proxy_sbd;
470 char *path;
471
472 virtio_mmio_bus = qdev_get_parent_bus(dev);
473 virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
474 proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
475
476 /*
477 * If @format_transport_address is false, then we just perform the same as
478 * virtio_bus_get_dev_path(): we delegate the address formatting for the
479 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
480 * (i.e., the device that implements the virtio-mmio bus) resides on. In
481 * this case the base address of the virtio-mmio transport will be
482 * invisible.
483 */
484 if (!virtio_mmio_proxy->format_transport_address) {
485 return proxy_path;
486 }
487
488 /* Otherwise, we append the base address of the transport. */
489 proxy_sbd = SYS_BUS_DEVICE(virtio_mmio_proxy);
490 assert(proxy_sbd->num_mmio == 1);
491 assert(proxy_sbd->mmio[0].memory == &virtio_mmio_proxy->iomem);
492
493 if (proxy_path) {
494 path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
495 proxy_sbd->mmio[0].addr);
496 } else {
497 path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
498 proxy_sbd->mmio[0].addr);
499 }
500 g_free(proxy_path);
501 return path;
502 }
503
504 static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
505 {
506 BusClass *bus_class = BUS_CLASS(klass);
507 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
508
509 k->notify = virtio_mmio_update_irq;
510 k->save_config = virtio_mmio_save_config;
511 k->load_config = virtio_mmio_load_config;
512 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
513 k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
514 k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
515 k->has_variable_vring_alignment = true;
516 bus_class->max_dev = 1;
517 bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
518 }
519
520 static const TypeInfo virtio_mmio_bus_info = {
521 .name = TYPE_VIRTIO_MMIO_BUS,
522 .parent = TYPE_VIRTIO_BUS,
523 .instance_size = sizeof(VirtioBusState),
524 .class_init = virtio_mmio_bus_class_init,
525 };
526
527 static void virtio_mmio_register_types(void)
528 {
529 type_register_static(&virtio_mmio_bus_info);
530 type_register_static(&virtio_mmio_info);
531 }
532
533 type_init(virtio_mmio_register_types)