meson: convert hw/vfio
[qemu.git] / hw / s390x / virtio-ccw.c
1 /*
2 * virtio ccw target implementation
3 *
4 * Copyright 2012,2015 IBM Corp.
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Pierre Morel <pmorel@linux.vnet.ibm.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or (at
9 * your option) any later version. See the COPYING file in the top-level
10 * directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "sysemu/kvm.h"
16 #include "net/net.h"
17 #include "hw/virtio/virtio.h"
18 #include "migration/qemu-file-types.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/sysbus.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/module.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "hw/s390x/adapter.h"
27 #include "hw/s390x/s390_flic.h"
28
29 #include "hw/s390x/ioinst.h"
30 #include "hw/s390x/css.h"
31 #include "virtio-ccw.h"
32 #include "trace.h"
33 #include "hw/s390x/css-bridge.h"
34 #include "hw/s390x/s390-virtio-ccw.h"
35
36 #define NR_CLASSIC_INDICATOR_BITS 64
37
38 static int virtio_ccw_dev_post_load(void *opaque, int version_id)
39 {
40 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque);
41 CcwDevice *ccw_dev = CCW_DEVICE(dev);
42 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
43
44 ccw_dev->sch->driver_data = dev;
45 if (ccw_dev->sch->thinint_active) {
46 dev->routes.adapter.adapter_id = css_get_adapter_id(
47 CSS_IO_ADAPTER_VIRTIO,
48 dev->thinint_isc);
49 }
50 /* Re-fill subch_id after loading the subchannel states.*/
51 if (ck->refill_ids) {
52 ck->refill_ids(ccw_dev);
53 }
54 return 0;
55 }
56
57 typedef struct VirtioCcwDeviceTmp {
58 VirtioCcwDevice *parent;
59 uint16_t config_vector;
60 } VirtioCcwDeviceTmp;
61
62 static int virtio_ccw_dev_tmp_pre_save(void *opaque)
63 {
64 VirtioCcwDeviceTmp *tmp = opaque;
65 VirtioCcwDevice *dev = tmp->parent;
66 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
67
68 tmp->config_vector = vdev->config_vector;
69
70 return 0;
71 }
72
73 static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id)
74 {
75 VirtioCcwDeviceTmp *tmp = opaque;
76 VirtioCcwDevice *dev = tmp->parent;
77 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
78
79 vdev->config_vector = tmp->config_vector;
80 return 0;
81 }
82
83 const VMStateDescription vmstate_virtio_ccw_dev_tmp = {
84 .name = "s390_virtio_ccw_dev_tmp",
85 .pre_save = virtio_ccw_dev_tmp_pre_save,
86 .post_load = virtio_ccw_dev_tmp_post_load,
87 .fields = (VMStateField[]) {
88 VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp),
89 VMSTATE_END_OF_LIST()
90 }
91 };
92
93 const VMStateDescription vmstate_virtio_ccw_dev = {
94 .name = "s390_virtio_ccw_dev",
95 .version_id = 1,
96 .minimum_version_id = 1,
97 .post_load = virtio_ccw_dev_post_load,
98 .fields = (VMStateField[]) {
99 VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice),
100 VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice),
101 VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice),
102 VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice),
103 /*
104 * Ugly hack because VirtIODevice does not migrate itself.
105 * This also makes legacy via vmstate_save_state possible.
106 */
107 VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp,
108 vmstate_virtio_ccw_dev_tmp),
109 VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes,
110 AdapterRoutes),
111 VMSTATE_UINT8(thinint_isc, VirtioCcwDevice),
112 VMSTATE_INT32(revision, VirtioCcwDevice),
113 VMSTATE_END_OF_LIST()
114 }
115 };
116
117 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
118 VirtioCcwDevice *dev);
119
120 VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
121 {
122 VirtIODevice *vdev = NULL;
123 VirtioCcwDevice *dev = sch->driver_data;
124
125 if (dev) {
126 vdev = virtio_bus_get_device(&dev->bus);
127 }
128 return vdev;
129 }
130
131 static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
132 {
133 virtio_bus_start_ioeventfd(&dev->bus);
134 }
135
136 static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
137 {
138 virtio_bus_stop_ioeventfd(&dev->bus);
139 }
140
141 static bool virtio_ccw_ioeventfd_enabled(DeviceState *d)
142 {
143 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
144
145 return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0;
146 }
147
148 static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
149 int n, bool assign)
150 {
151 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
152 CcwDevice *ccw_dev = CCW_DEVICE(dev);
153 SubchDev *sch = ccw_dev->sch;
154 uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
155
156 return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
157 }
158
159 /* Communication blocks used by several channel commands. */
160 typedef struct VqInfoBlockLegacy {
161 uint64_t queue;
162 uint32_t align;
163 uint16_t index;
164 uint16_t num;
165 } QEMU_PACKED VqInfoBlockLegacy;
166
167 typedef struct VqInfoBlock {
168 uint64_t desc;
169 uint32_t res0;
170 uint16_t index;
171 uint16_t num;
172 uint64_t avail;
173 uint64_t used;
174 } QEMU_PACKED VqInfoBlock;
175
176 typedef struct VqConfigBlock {
177 uint16_t index;
178 uint16_t num_max;
179 } QEMU_PACKED VqConfigBlock;
180
181 typedef struct VirtioFeatDesc {
182 uint32_t features;
183 uint8_t index;
184 } QEMU_PACKED VirtioFeatDesc;
185
186 typedef struct VirtioThinintInfo {
187 hwaddr summary_indicator;
188 hwaddr device_indicator;
189 uint64_t ind_bit;
190 uint8_t isc;
191 } QEMU_PACKED VirtioThinintInfo;
192
193 typedef struct VirtioRevInfo {
194 uint16_t revision;
195 uint16_t length;
196 uint8_t data[];
197 } QEMU_PACKED VirtioRevInfo;
198
199 /* Specify where the virtqueues for the subchannel are in guest memory. */
200 static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
201 VqInfoBlockLegacy *linfo)
202 {
203 VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
204 uint16_t index = info ? info->index : linfo->index;
205 uint16_t num = info ? info->num : linfo->num;
206 uint64_t desc = info ? info->desc : linfo->queue;
207
208 if (index >= VIRTIO_QUEUE_MAX) {
209 return -EINVAL;
210 }
211
212 /* Current code in virtio.c relies on 4K alignment. */
213 if (linfo && desc && (linfo->align != 4096)) {
214 return -EINVAL;
215 }
216
217 if (!vdev) {
218 return -EINVAL;
219 }
220
221 if (info) {
222 virtio_queue_set_rings(vdev, index, desc, info->avail, info->used);
223 } else {
224 virtio_queue_set_addr(vdev, index, desc);
225 }
226 if (!desc) {
227 virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR);
228 } else {
229 if (info) {
230 /* virtio-1 allows changing the ring size. */
231 if (virtio_queue_get_max_num(vdev, index) < num) {
232 /* Fail if we exceed the maximum number. */
233 return -EINVAL;
234 }
235 virtio_queue_set_num(vdev, index, num);
236 } else if (virtio_queue_get_num(vdev, index) > num) {
237 /* Fail if we don't have a big enough queue. */
238 return -EINVAL;
239 }
240 /* We ignore possible increased num for legacy for compatibility. */
241 virtio_queue_set_vector(vdev, index, index);
242 }
243 /* tell notify handler in case of config change */
244 vdev->config_vector = VIRTIO_QUEUE_MAX;
245 return 0;
246 }
247
248 static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev)
249 {
250 CcwDevice *ccw_dev = CCW_DEVICE(dev);
251
252 virtio_ccw_stop_ioeventfd(dev);
253 virtio_reset(vdev);
254 if (dev->indicators) {
255 release_indicator(&dev->routes.adapter, dev->indicators);
256 dev->indicators = NULL;
257 }
258 if (dev->indicators2) {
259 release_indicator(&dev->routes.adapter, dev->indicators2);
260 dev->indicators2 = NULL;
261 }
262 if (dev->summary_indicator) {
263 release_indicator(&dev->routes.adapter, dev->summary_indicator);
264 dev->summary_indicator = NULL;
265 }
266 ccw_dev->sch->thinint_active = false;
267 }
268
269 static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len,
270 bool is_legacy)
271 {
272 int ret;
273 VqInfoBlock info;
274 VqInfoBlockLegacy linfo;
275 size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info);
276
277 if (check_len) {
278 if (ccw.count != info_len) {
279 return -EINVAL;
280 }
281 } else if (ccw.count < info_len) {
282 /* Can't execute command. */
283 return -EINVAL;
284 }
285 if (!ccw.cda) {
286 return -EFAULT;
287 }
288 if (is_legacy) {
289 ccw_dstream_read(&sch->cds, linfo);
290 linfo.queue = be64_to_cpu(linfo.queue);
291 linfo.align = be32_to_cpu(linfo.align);
292 linfo.index = be16_to_cpu(linfo.index);
293 linfo.num = be16_to_cpu(linfo.num);
294 ret = virtio_ccw_set_vqs(sch, NULL, &linfo);
295 } else {
296 ccw_dstream_read(&sch->cds, info);
297 info.desc = be64_to_cpu(info.desc);
298 info.index = be16_to_cpu(info.index);
299 info.num = be16_to_cpu(info.num);
300 info.avail = be64_to_cpu(info.avail);
301 info.used = be64_to_cpu(info.used);
302 ret = virtio_ccw_set_vqs(sch, &info, NULL);
303 }
304 sch->curr_status.scsw.count = 0;
305 return ret;
306 }
307
308 static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
309 {
310 int ret;
311 VirtioRevInfo revinfo;
312 uint8_t status;
313 VirtioFeatDesc features;
314 hwaddr indicators;
315 VqConfigBlock vq_config;
316 VirtioCcwDevice *dev = sch->driver_data;
317 VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
318 bool check_len;
319 int len;
320 VirtioThinintInfo thinint;
321
322 if (!dev) {
323 return -EINVAL;
324 }
325
326 trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
327 ccw.cmd_code);
328 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
329
330 if (dev->force_revision_1 && dev->revision < 0 &&
331 ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) {
332 /*
333 * virtio-1 drivers must start with negotiating to a revision >= 1,
334 * so post a command reject for all other commands
335 */
336 return -ENOSYS;
337 }
338
339 /* Look at the command. */
340 switch (ccw.cmd_code) {
341 case CCW_CMD_SET_VQ:
342 ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1);
343 break;
344 case CCW_CMD_VDEV_RESET:
345 virtio_ccw_reset_virtio(dev, vdev);
346 ret = 0;
347 break;
348 case CCW_CMD_READ_FEAT:
349 if (check_len) {
350 if (ccw.count != sizeof(features)) {
351 ret = -EINVAL;
352 break;
353 }
354 } else if (ccw.count < sizeof(features)) {
355 /* Can't execute command. */
356 ret = -EINVAL;
357 break;
358 }
359 if (!ccw.cda) {
360 ret = -EFAULT;
361 } else {
362 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
363
364 ccw_dstream_advance(&sch->cds, sizeof(features.features));
365 ccw_dstream_read(&sch->cds, features.index);
366 if (features.index == 0) {
367 if (dev->revision >= 1) {
368 /* Don't offer legacy features for modern devices. */
369 features.features = (uint32_t)
370 (vdev->host_features & ~vdc->legacy_features);
371 } else {
372 features.features = (uint32_t)vdev->host_features;
373 }
374 } else if ((features.index == 1) && (dev->revision >= 1)) {
375 /*
376 * Only offer feature bits beyond 31 if the guest has
377 * negotiated at least revision 1.
378 */
379 features.features = (uint32_t)(vdev->host_features >> 32);
380 } else {
381 /* Return zeroes if the guest supports more feature bits. */
382 features.features = 0;
383 }
384 ccw_dstream_rewind(&sch->cds);
385 features.features = cpu_to_le32(features.features);
386 ccw_dstream_write(&sch->cds, features.features);
387 sch->curr_status.scsw.count = ccw.count - sizeof(features);
388 ret = 0;
389 }
390 break;
391 case CCW_CMD_WRITE_FEAT:
392 if (check_len) {
393 if (ccw.count != sizeof(features)) {
394 ret = -EINVAL;
395 break;
396 }
397 } else if (ccw.count < sizeof(features)) {
398 /* Can't execute command. */
399 ret = -EINVAL;
400 break;
401 }
402 if (!ccw.cda) {
403 ret = -EFAULT;
404 } else {
405 ccw_dstream_read(&sch->cds, features);
406 features.features = le32_to_cpu(features.features);
407 if (features.index == 0) {
408 virtio_set_features(vdev,
409 (vdev->guest_features & 0xffffffff00000000ULL) |
410 features.features);
411 } else if ((features.index == 1) && (dev->revision >= 1)) {
412 /*
413 * If the guest did not negotiate at least revision 1,
414 * we did not offer it any feature bits beyond 31. Such a
415 * guest passing us any bit here is therefore buggy.
416 */
417 virtio_set_features(vdev,
418 (vdev->guest_features & 0x00000000ffffffffULL) |
419 ((uint64_t)features.features << 32));
420 } else {
421 /*
422 * If the guest supports more feature bits, assert that it
423 * passes us zeroes for those we don't support.
424 */
425 if (features.features) {
426 qemu_log_mask(LOG_GUEST_ERROR,
427 "Guest bug: features[%i]=%x (expected 0)",
428 features.index, features.features);
429 /* XXX: do a unit check here? */
430 }
431 }
432 sch->curr_status.scsw.count = ccw.count - sizeof(features);
433 ret = 0;
434 }
435 break;
436 case CCW_CMD_READ_CONF:
437 if (check_len) {
438 if (ccw.count > vdev->config_len) {
439 ret = -EINVAL;
440 break;
441 }
442 }
443 len = MIN(ccw.count, vdev->config_len);
444 if (!ccw.cda) {
445 ret = -EFAULT;
446 } else {
447 virtio_bus_get_vdev_config(&dev->bus, vdev->config);
448 ccw_dstream_write_buf(&sch->cds, vdev->config, len);
449 sch->curr_status.scsw.count = ccw.count - len;
450 ret = 0;
451 }
452 break;
453 case CCW_CMD_WRITE_CONF:
454 if (check_len) {
455 if (ccw.count > vdev->config_len) {
456 ret = -EINVAL;
457 break;
458 }
459 }
460 len = MIN(ccw.count, vdev->config_len);
461 if (!ccw.cda) {
462 ret = -EFAULT;
463 } else {
464 ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len);
465 if (!ret) {
466 virtio_bus_set_vdev_config(&dev->bus, vdev->config);
467 sch->curr_status.scsw.count = ccw.count - len;
468 }
469 }
470 break;
471 case CCW_CMD_READ_STATUS:
472 if (check_len) {
473 if (ccw.count != sizeof(status)) {
474 ret = -EINVAL;
475 break;
476 }
477 } else if (ccw.count < sizeof(status)) {
478 /* Can't execute command. */
479 ret = -EINVAL;
480 break;
481 }
482 if (!ccw.cda) {
483 ret = -EFAULT;
484 } else {
485 address_space_stb(&address_space_memory, ccw.cda, vdev->status,
486 MEMTXATTRS_UNSPECIFIED, NULL);
487 sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status);
488 ret = 0;
489 }
490 break;
491 case CCW_CMD_WRITE_STATUS:
492 if (check_len) {
493 if (ccw.count != sizeof(status)) {
494 ret = -EINVAL;
495 break;
496 }
497 } else if (ccw.count < sizeof(status)) {
498 /* Can't execute command. */
499 ret = -EINVAL;
500 break;
501 }
502 if (!ccw.cda) {
503 ret = -EFAULT;
504 } else {
505 ccw_dstream_read(&sch->cds, status);
506 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
507 virtio_ccw_stop_ioeventfd(dev);
508 }
509 if (virtio_set_status(vdev, status) == 0) {
510 if (vdev->status == 0) {
511 virtio_ccw_reset_virtio(dev, vdev);
512 }
513 if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
514 virtio_ccw_start_ioeventfd(dev);
515 }
516 sch->curr_status.scsw.count = ccw.count - sizeof(status);
517 ret = 0;
518 } else {
519 /* Trigger a command reject. */
520 ret = -ENOSYS;
521 }
522 }
523 break;
524 case CCW_CMD_SET_IND:
525 if (check_len) {
526 if (ccw.count != sizeof(indicators)) {
527 ret = -EINVAL;
528 break;
529 }
530 } else if (ccw.count < sizeof(indicators)) {
531 /* Can't execute command. */
532 ret = -EINVAL;
533 break;
534 }
535 if (sch->thinint_active) {
536 /* Trigger a command reject. */
537 ret = -ENOSYS;
538 break;
539 }
540 if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) {
541 /* More queues than indicator bits --> trigger a reject */
542 ret = -ENOSYS;
543 break;
544 }
545 if (!ccw.cda) {
546 ret = -EFAULT;
547 } else {
548 ccw_dstream_read(&sch->cds, indicators);
549 indicators = be64_to_cpu(indicators);
550 dev->indicators = get_indicator(indicators, sizeof(uint64_t));
551 sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
552 ret = 0;
553 }
554 break;
555 case CCW_CMD_SET_CONF_IND:
556 if (check_len) {
557 if (ccw.count != sizeof(indicators)) {
558 ret = -EINVAL;
559 break;
560 }
561 } else if (ccw.count < sizeof(indicators)) {
562 /* Can't execute command. */
563 ret = -EINVAL;
564 break;
565 }
566 if (!ccw.cda) {
567 ret = -EFAULT;
568 } else {
569 ccw_dstream_read(&sch->cds, indicators);
570 indicators = be64_to_cpu(indicators);
571 dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
572 sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
573 ret = 0;
574 }
575 break;
576 case CCW_CMD_READ_VQ_CONF:
577 if (check_len) {
578 if (ccw.count != sizeof(vq_config)) {
579 ret = -EINVAL;
580 break;
581 }
582 } else if (ccw.count < sizeof(vq_config)) {
583 /* Can't execute command. */
584 ret = -EINVAL;
585 break;
586 }
587 if (!ccw.cda) {
588 ret = -EFAULT;
589 } else {
590 ccw_dstream_read(&sch->cds, vq_config.index);
591 vq_config.index = be16_to_cpu(vq_config.index);
592 if (vq_config.index >= VIRTIO_QUEUE_MAX) {
593 ret = -EINVAL;
594 break;
595 }
596 vq_config.num_max = virtio_queue_get_num(vdev,
597 vq_config.index);
598 vq_config.num_max = cpu_to_be16(vq_config.num_max);
599 ccw_dstream_write(&sch->cds, vq_config.num_max);
600 sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
601 ret = 0;
602 }
603 break;
604 case CCW_CMD_SET_IND_ADAPTER:
605 if (check_len) {
606 if (ccw.count != sizeof(thinint)) {
607 ret = -EINVAL;
608 break;
609 }
610 } else if (ccw.count < sizeof(thinint)) {
611 /* Can't execute command. */
612 ret = -EINVAL;
613 break;
614 }
615 if (!ccw.cda) {
616 ret = -EFAULT;
617 } else if (dev->indicators && !sch->thinint_active) {
618 /* Trigger a command reject. */
619 ret = -ENOSYS;
620 } else {
621 if (ccw_dstream_read(&sch->cds, thinint)) {
622 ret = -EFAULT;
623 } else {
624 thinint.ind_bit = be64_to_cpu(thinint.ind_bit);
625 thinint.summary_indicator =
626 be64_to_cpu(thinint.summary_indicator);
627 thinint.device_indicator =
628 be64_to_cpu(thinint.device_indicator);
629
630 dev->summary_indicator =
631 get_indicator(thinint.summary_indicator, sizeof(uint8_t));
632 dev->indicators =
633 get_indicator(thinint.device_indicator,
634 thinint.ind_bit / 8 + 1);
635 dev->thinint_isc = thinint.isc;
636 dev->routes.adapter.ind_offset = thinint.ind_bit;
637 dev->routes.adapter.summary_offset = 7;
638 dev->routes.adapter.adapter_id = css_get_adapter_id(
639 CSS_IO_ADAPTER_VIRTIO,
640 dev->thinint_isc);
641 sch->thinint_active = ((dev->indicators != NULL) &&
642 (dev->summary_indicator != NULL));
643 sch->curr_status.scsw.count = ccw.count - sizeof(thinint);
644 ret = 0;
645 }
646 }
647 break;
648 case CCW_CMD_SET_VIRTIO_REV:
649 len = sizeof(revinfo);
650 if (ccw.count < len) {
651 ret = -EINVAL;
652 break;
653 }
654 if (!ccw.cda) {
655 ret = -EFAULT;
656 break;
657 }
658 ccw_dstream_read_buf(&sch->cds, &revinfo, 4);
659 revinfo.revision = be16_to_cpu(revinfo.revision);
660 revinfo.length = be16_to_cpu(revinfo.length);
661 if (ccw.count < len + revinfo.length ||
662 (check_len && ccw.count > len + revinfo.length)) {
663 ret = -EINVAL;
664 break;
665 }
666 /*
667 * Once we start to support revisions with additional data, we'll
668 * need to fetch it here. Nothing to do for now, though.
669 */
670 if (dev->revision >= 0 ||
671 revinfo.revision > virtio_ccw_rev_max(dev) ||
672 (dev->force_revision_1 && !revinfo.revision)) {
673 ret = -ENOSYS;
674 break;
675 }
676 ret = 0;
677 dev->revision = revinfo.revision;
678 break;
679 default:
680 ret = -ENOSYS;
681 break;
682 }
683 return ret;
684 }
685
686 static void virtio_sch_disable_cb(SubchDev *sch)
687 {
688 VirtioCcwDevice *dev = sch->driver_data;
689
690 dev->revision = -1;
691 }
692
693 static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
694 {
695 VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
696 CcwDevice *ccw_dev = CCW_DEVICE(dev);
697 CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev);
698 SubchDev *sch;
699 Error *err = NULL;
700 int i;
701
702 sch = css_create_sch(ccw_dev->devno, errp);
703 if (!sch) {
704 return;
705 }
706 if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) {
707 error_setg(&err, "Invalid value of property max_rev "
708 "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
709 goto out_err;
710 }
711
712 sch->driver_data = dev;
713 sch->ccw_cb = virtio_ccw_cb;
714 sch->disable_cb = virtio_sch_disable_cb;
715 sch->id.reserved = 0xff;
716 sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
717 sch->do_subchannel_work = do_subchannel_work_virtual;
718 ccw_dev->sch = sch;
719 dev->indicators = NULL;
720 dev->revision = -1;
721 for (i = 0; i < ADAPTER_ROUTES_MAX_GSI; i++) {
722 dev->routes.gsi[i] = -1;
723 }
724 css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE);
725
726 trace_virtio_ccw_new_device(
727 sch->cssid, sch->ssid, sch->schid, sch->devno,
728 ccw_dev->devno.valid ? "user-configured" : "auto-configured");
729
730 if (kvm_enabled() && !kvm_eventfds_enabled()) {
731 dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
732 }
733
734 if (k->realize) {
735 k->realize(dev, &err);
736 if (err) {
737 goto out_err;
738 }
739 }
740
741 ck->realize(ccw_dev, &err);
742 if (err) {
743 goto out_err;
744 }
745
746 return;
747
748 out_err:
749 error_propagate(errp, err);
750 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
751 ccw_dev->sch = NULL;
752 g_free(sch);
753 }
754
755 static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev)
756 {
757 VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
758 CcwDevice *ccw_dev = CCW_DEVICE(dev);
759 SubchDev *sch = ccw_dev->sch;
760
761 if (dc->unrealize) {
762 dc->unrealize(dev);
763 }
764
765 if (sch) {
766 css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
767 g_free(sch);
768 ccw_dev->sch = NULL;
769 }
770 if (dev->indicators) {
771 release_indicator(&dev->routes.adapter, dev->indicators);
772 dev->indicators = NULL;
773 }
774 }
775
776 /* DeviceState to VirtioCcwDevice. Note: used on datapath,
777 * be careful and test performance if you change this.
778 */
779 static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d)
780 {
781 CcwDevice *ccw_dev = to_ccw_dev_fast(d);
782
783 return container_of(ccw_dev, VirtioCcwDevice, parent_obj);
784 }
785
786 static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc,
787 uint8_t to_be_set)
788 {
789 uint8_t expected, actual;
790 hwaddr len = 1;
791 /* avoid multiple fetches */
792 uint8_t volatile *ind_addr;
793
794 ind_addr = cpu_physical_memory_map(ind_loc, &len, true);
795 if (!ind_addr) {
796 error_report("%s(%x.%x.%04x): unable to access indicator",
797 __func__, sch->cssid, sch->ssid, sch->schid);
798 return -1;
799 }
800 actual = *ind_addr;
801 do {
802 expected = actual;
803 actual = atomic_cmpxchg(ind_addr, expected, expected | to_be_set);
804 } while (actual != expected);
805 trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set);
806 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
807
808 return actual;
809 }
810
811 static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
812 {
813 VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d);
814 CcwDevice *ccw_dev = to_ccw_dev_fast(d);
815 SubchDev *sch = ccw_dev->sch;
816 uint64_t indicators;
817
818 if (vector == VIRTIO_NO_VECTOR) {
819 return;
820 }
821 /*
822 * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue
823 * vector == VIRTIO_QUEUE_MAX: configuration change notification
824 * bits beyond that are unused and should never be notified for
825 */
826 assert(vector <= VIRTIO_QUEUE_MAX);
827
828 if (vector < VIRTIO_QUEUE_MAX) {
829 if (!dev->indicators) {
830 return;
831 }
832 if (sch->thinint_active) {
833 /*
834 * In the adapter interrupt case, indicators points to a
835 * memory area that may be (way) larger than 64 bit and
836 * ind_bit indicates the start of the indicators in a big
837 * endian notation.
838 */
839 uint64_t ind_bit = dev->routes.adapter.ind_offset;
840
841 virtio_set_ind_atomic(sch, dev->indicators->addr +
842 (ind_bit + vector) / 8,
843 0x80 >> ((ind_bit + vector) % 8));
844 if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr,
845 0x01)) {
846 css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc);
847 }
848 } else {
849 assert(vector < NR_CLASSIC_INDICATOR_BITS);
850 indicators = address_space_ldq(&address_space_memory,
851 dev->indicators->addr,
852 MEMTXATTRS_UNSPECIFIED,
853 NULL);
854 indicators |= 1ULL << vector;
855 address_space_stq(&address_space_memory, dev->indicators->addr,
856 indicators, MEMTXATTRS_UNSPECIFIED, NULL);
857 css_conditional_io_interrupt(sch);
858 }
859 } else {
860 if (!dev->indicators2) {
861 return;
862 }
863 indicators = address_space_ldq(&address_space_memory,
864 dev->indicators2->addr,
865 MEMTXATTRS_UNSPECIFIED,
866 NULL);
867 indicators |= 1ULL;
868 address_space_stq(&address_space_memory, dev->indicators2->addr,
869 indicators, MEMTXATTRS_UNSPECIFIED, NULL);
870 css_conditional_io_interrupt(sch);
871 }
872 }
873
874 static void virtio_ccw_reset(DeviceState *d)
875 {
876 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
877 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
878 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
879
880 virtio_ccw_reset_virtio(dev, vdev);
881 if (vdc->parent_reset) {
882 vdc->parent_reset(d);
883 }
884 }
885
886 static void virtio_ccw_vmstate_change(DeviceState *d, bool running)
887 {
888 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
889
890 if (running) {
891 virtio_ccw_start_ioeventfd(dev);
892 } else {
893 virtio_ccw_stop_ioeventfd(dev);
894 }
895 }
896
897 static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
898 {
899 CcwDevice *dev = CCW_DEVICE(d);
900
901 return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
902 }
903
904 static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
905 {
906 int r;
907 CcwDevice *ccw_dev = CCW_DEVICE(dev);
908
909 if (!ccw_dev->sch->thinint_active) {
910 return -EINVAL;
911 }
912
913 r = map_indicator(&dev->routes.adapter, dev->summary_indicator);
914 if (r) {
915 return r;
916 }
917 r = map_indicator(&dev->routes.adapter, dev->indicators);
918 if (r) {
919 return r;
920 }
921 dev->routes.adapter.summary_addr = dev->summary_indicator->map;
922 dev->routes.adapter.ind_addr = dev->indicators->map;
923
924 return 0;
925 }
926
927 static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs)
928 {
929 int i;
930 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
931 int ret;
932 S390FLICState *fs = s390_get_flic();
933 S390FLICStateClass *fsc = s390_get_flic_class(fs);
934
935 ret = virtio_ccw_get_mappings(dev);
936 if (ret) {
937 return ret;
938 }
939 for (i = 0; i < nvqs; i++) {
940 if (!virtio_queue_get_num(vdev, i)) {
941 break;
942 }
943 }
944 dev->routes.num_routes = i;
945 return fsc->add_adapter_routes(fs, &dev->routes);
946 }
947
948 static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs)
949 {
950 S390FLICState *fs = s390_get_flic();
951 S390FLICStateClass *fsc = s390_get_flic_class(fs);
952
953 fsc->release_adapter_routes(fs, &dev->routes);
954 }
955
956 static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n)
957 {
958 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
959 VirtQueue *vq = virtio_get_queue(vdev, n);
960 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
961
962 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL,
963 dev->routes.gsi[n]);
964 }
965
966 static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n)
967 {
968 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
969 VirtQueue *vq = virtio_get_queue(vdev, n);
970 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
971 int ret;
972
973 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier,
974 dev->routes.gsi[n]);
975 assert(ret == 0);
976 }
977
978 static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
979 bool assign, bool with_irqfd)
980 {
981 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
982 VirtQueue *vq = virtio_get_queue(vdev, n);
983 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
984 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
985
986 if (assign) {
987 int r = event_notifier_init(notifier, 0);
988
989 if (r < 0) {
990 return r;
991 }
992 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
993 if (with_irqfd) {
994 r = virtio_ccw_add_irqfd(dev, n);
995 if (r) {
996 virtio_queue_set_guest_notifier_fd_handler(vq, false,
997 with_irqfd);
998 return r;
999 }
1000 }
1001 /*
1002 * We do not support individual masking for channel devices, so we
1003 * need to manually trigger any guest masking callbacks here.
1004 */
1005 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1006 k->guest_notifier_mask(vdev, n, false);
1007 }
1008 /* get lost events and re-inject */
1009 if (k->guest_notifier_pending &&
1010 k->guest_notifier_pending(vdev, n)) {
1011 event_notifier_set(notifier);
1012 }
1013 } else {
1014 if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
1015 k->guest_notifier_mask(vdev, n, true);
1016 }
1017 if (with_irqfd) {
1018 virtio_ccw_remove_irqfd(dev, n);
1019 }
1020 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
1021 event_notifier_cleanup(notifier);
1022 }
1023 return 0;
1024 }
1025
1026 static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs,
1027 bool assigned)
1028 {
1029 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1030 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1031 CcwDevice *ccw_dev = CCW_DEVICE(d);
1032 bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled();
1033 int r, n;
1034
1035 if (with_irqfd && assigned) {
1036 /* irq routes need to be set up before assigning irqfds */
1037 r = virtio_ccw_setup_irqroutes(dev, nvqs);
1038 if (r < 0) {
1039 goto irqroute_error;
1040 }
1041 }
1042 for (n = 0; n < nvqs; n++) {
1043 if (!virtio_queue_get_num(vdev, n)) {
1044 break;
1045 }
1046 r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd);
1047 if (r < 0) {
1048 goto assign_error;
1049 }
1050 }
1051 if (with_irqfd && !assigned) {
1052 /* release irq routes after irqfds have been released */
1053 virtio_ccw_release_irqroutes(dev, nvqs);
1054 }
1055 return 0;
1056
1057 assign_error:
1058 while (--n >= 0) {
1059 virtio_ccw_set_guest_notifier(dev, n, !assigned, false);
1060 }
1061 irqroute_error:
1062 if (with_irqfd && assigned) {
1063 virtio_ccw_release_irqroutes(dev, nvqs);
1064 }
1065 return r;
1066 }
1067
1068 static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f)
1069 {
1070 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1071 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1072
1073 qemu_put_be16(f, virtio_queue_vector(vdev, n));
1074 }
1075
1076 static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f)
1077 {
1078 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1079 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1080 uint16_t vector;
1081
1082 qemu_get_be16s(f, &vector);
1083 virtio_queue_set_vector(vdev, n , vector);
1084
1085 return 0;
1086 }
1087
1088 static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f)
1089 {
1090 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1091 vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL);
1092 }
1093
1094 static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
1095 {
1096 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1097 return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1);
1098 }
1099
1100 static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp)
1101 {
1102 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1103 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1104
1105 if (dev->max_rev >= 1) {
1106 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1107 }
1108 }
1109
1110 /* This is called by virtio-bus just after the device is plugged. */
1111 static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
1112 {
1113 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1114 VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
1115 CcwDevice *ccw_dev = CCW_DEVICE(d);
1116 SubchDev *sch = ccw_dev->sch;
1117 int n = virtio_get_num_queues(vdev);
1118 S390FLICState *flic = s390_get_flic();
1119
1120 if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1121 dev->max_rev = 0;
1122 }
1123
1124 if (!virtio_ccw_rev_max(dev) && !virtio_legacy_allowed(vdev)) {
1125 error_setg(errp, "Invalid value of property max_rev "
1126 "(is %d expected >= 1)", virtio_ccw_rev_max(dev));
1127 return;
1128 }
1129
1130 if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) {
1131 error_setg(errp, "The number of virtqueues %d "
1132 "exceeds virtio limit %d", n,
1133 VIRTIO_QUEUE_MAX);
1134 return;
1135 }
1136 if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) {
1137 error_setg(errp, "The number of virtqueues %d "
1138 "exceeds flic adapter route limit %d", n,
1139 flic->adapter_routes_max_batch);
1140 return;
1141 }
1142
1143 sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
1144
1145
1146 css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
1147 d->hotplugged, 1);
1148 }
1149
1150 static void virtio_ccw_device_unplugged(DeviceState *d)
1151 {
1152 VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
1153
1154 virtio_ccw_stop_ioeventfd(dev);
1155 }
1156 /**************** Virtio-ccw Bus Device Descriptions *******************/
1157
1158 static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp)
1159 {
1160 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1161
1162 virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev);
1163 virtio_ccw_device_realize(_dev, errp);
1164 }
1165
1166 static void virtio_ccw_busdev_unrealize(DeviceState *dev)
1167 {
1168 VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
1169
1170 virtio_ccw_device_unrealize(_dev);
1171 }
1172
1173 static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev,
1174 DeviceState *dev, Error **errp)
1175 {
1176 VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev);
1177
1178 virtio_ccw_stop_ioeventfd(_dev);
1179 }
1180
1181 static void virtio_ccw_device_class_init(ObjectClass *klass, void *data)
1182 {
1183 DeviceClass *dc = DEVICE_CLASS(klass);
1184 CCWDeviceClass *k = CCW_DEVICE_CLASS(dc);
1185 VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass);
1186
1187 k->unplug = virtio_ccw_busdev_unplug;
1188 dc->realize = virtio_ccw_busdev_realize;
1189 dc->unrealize = virtio_ccw_busdev_unrealize;
1190 dc->bus_type = TYPE_VIRTUAL_CSS_BUS;
1191 device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset);
1192 }
1193
1194 static const TypeInfo virtio_ccw_device_info = {
1195 .name = TYPE_VIRTIO_CCW_DEVICE,
1196 .parent = TYPE_CCW_DEVICE,
1197 .instance_size = sizeof(VirtioCcwDevice),
1198 .class_init = virtio_ccw_device_class_init,
1199 .class_size = sizeof(VirtIOCCWDeviceClass),
1200 .abstract = true,
1201 };
1202
1203 /* virtio-ccw-bus */
1204
1205 static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
1206 VirtioCcwDevice *dev)
1207 {
1208 DeviceState *qdev = DEVICE(dev);
1209 char virtio_bus_name[] = "virtio-bus";
1210
1211 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS,
1212 qdev, virtio_bus_name);
1213 }
1214
1215 static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
1216 {
1217 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
1218 BusClass *bus_class = BUS_CLASS(klass);
1219
1220 bus_class->max_dev = 1;
1221 k->notify = virtio_ccw_notify;
1222 k->vmstate_change = virtio_ccw_vmstate_change;
1223 k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
1224 k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
1225 k->save_queue = virtio_ccw_save_queue;
1226 k->load_queue = virtio_ccw_load_queue;
1227 k->save_config = virtio_ccw_save_config;
1228 k->load_config = virtio_ccw_load_config;
1229 k->pre_plugged = virtio_ccw_pre_plugged;
1230 k->device_plugged = virtio_ccw_device_plugged;
1231 k->device_unplugged = virtio_ccw_device_unplugged;
1232 k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled;
1233 k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
1234 }
1235
1236 static const TypeInfo virtio_ccw_bus_info = {
1237 .name = TYPE_VIRTIO_CCW_BUS,
1238 .parent = TYPE_VIRTIO_BUS,
1239 .instance_size = sizeof(VirtioCcwBusState),
1240 .class_init = virtio_ccw_bus_class_init,
1241 };
1242
1243 static void virtio_ccw_register(void)
1244 {
1245 type_register_static(&virtio_ccw_bus_info);
1246 type_register_static(&virtio_ccw_device_info);
1247 }
1248
1249 type_init(virtio_ccw_register)