meson: convert hw/vfio
[qemu.git] / hw / s390x / event-facility.c
1 /*
2 * SCLP
3 * Event Facility
4 * handles SCLP event types
5 * - Signal Quiesce - system power down
6 * - ASCII Console Data - VT220 read and write
7 *
8 * Copyright IBM, Corp. 2012
9 *
10 * Authors:
11 * Heinz Graalfs <graalfs@de.ibm.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
14 * option) any later version. See the COPYING file in the top-level directory.
15 *
16 */
17
18 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "qemu/module.h"
21
22 #include "hw/s390x/sclp.h"
23 #include "migration/vmstate.h"
24 #include "hw/s390x/event-facility.h"
25
26 typedef struct SCLPEventsBus {
27 BusState qbus;
28 } SCLPEventsBus;
29
30 /* we need to save 32 bit chunks for compatibility */
31 #ifdef HOST_WORDS_BIGENDIAN
32 #define RECV_MASK_LOWER 1
33 #define RECV_MASK_UPPER 0
34 #else /* little endian host */
35 #define RECV_MASK_LOWER 0
36 #define RECV_MASK_UPPER 1
37 #endif
38
39 struct SCLPEventFacility {
40 SysBusDevice parent_obj;
41 SCLPEventsBus sbus;
42 SCLPEvent quiesce, cpu_hotplug;
43 /* guest's receive mask */
44 union {
45 uint32_t receive_mask_pieces[2];
46 sccb_mask_t receive_mask;
47 };
48 /*
49 * when false, we keep the same broken, backwards compatible behaviour as
50 * before, allowing only masks of size exactly 4; when true, we implement
51 * the architecture correctly, allowing all valid mask sizes. Needed for
52 * migration toward older versions.
53 */
54 bool allow_all_mask_sizes;
55 /* length of the receive mask */
56 uint16_t mask_length;
57 };
58
59 /* return true if any child has event pending set */
60 static bool event_pending(SCLPEventFacility *ef)
61 {
62 BusChild *kid;
63 SCLPEvent *event;
64 SCLPEventClass *event_class;
65
66 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
67 DeviceState *qdev = kid->child;
68 event = DO_UPCAST(SCLPEvent, qdev, qdev);
69 event_class = SCLP_EVENT_GET_CLASS(event);
70 if (event->event_pending &&
71 event_class->get_send_mask() & ef->receive_mask) {
72 return true;
73 }
74 }
75 return false;
76 }
77
78 static sccb_mask_t get_host_send_mask(SCLPEventFacility *ef)
79 {
80 sccb_mask_t mask;
81 BusChild *kid;
82 SCLPEventClass *child;
83
84 mask = 0;
85
86 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
87 DeviceState *qdev = kid->child;
88 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
89 mask |= child->get_send_mask();
90 }
91 return mask;
92 }
93
94 static sccb_mask_t get_host_receive_mask(SCLPEventFacility *ef)
95 {
96 sccb_mask_t mask;
97 BusChild *kid;
98 SCLPEventClass *child;
99
100 mask = 0;
101
102 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
103 DeviceState *qdev = kid->child;
104 child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
105 mask |= child->get_receive_mask();
106 }
107 return mask;
108 }
109
110 static uint16_t write_event_length_check(SCCB *sccb)
111 {
112 int slen;
113 unsigned elen = 0;
114 EventBufferHeader *event;
115 WriteEventData *wed = (WriteEventData *) sccb;
116
117 event = (EventBufferHeader *) &wed->ebh;
118 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
119 elen = be16_to_cpu(event->length);
120 if (elen < sizeof(*event) || elen > slen) {
121 return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR;
122 }
123 event = (void *) event + elen;
124 }
125 if (slen) {
126 return SCLP_RC_INCONSISTENT_LENGTHS;
127 }
128 return SCLP_RC_NORMAL_COMPLETION;
129 }
130
131 static uint16_t handle_write_event_buf(SCLPEventFacility *ef,
132 EventBufferHeader *event_buf, SCCB *sccb)
133 {
134 uint16_t rc;
135 BusChild *kid;
136 SCLPEvent *event;
137 SCLPEventClass *ec;
138
139 rc = SCLP_RC_INVALID_FUNCTION;
140
141 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
142 DeviceState *qdev = kid->child;
143 event = (SCLPEvent *) qdev;
144 ec = SCLP_EVENT_GET_CLASS(event);
145
146 if (ec->write_event_data &&
147 ec->can_handle_event(event_buf->type)) {
148 rc = ec->write_event_data(event, event_buf);
149 break;
150 }
151 }
152 return rc;
153 }
154
155 static uint16_t handle_sccb_write_events(SCLPEventFacility *ef, SCCB *sccb)
156 {
157 uint16_t rc;
158 int slen;
159 unsigned elen = 0;
160 EventBufferHeader *event_buf;
161 WriteEventData *wed = (WriteEventData *) sccb;
162
163 event_buf = &wed->ebh;
164 rc = SCLP_RC_NORMAL_COMPLETION;
165
166 /* loop over all contained event buffers */
167 for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
168 elen = be16_to_cpu(event_buf->length);
169
170 /* in case of a previous error mark all trailing buffers
171 * as not accepted */
172 if (rc != SCLP_RC_NORMAL_COMPLETION) {
173 event_buf->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED);
174 } else {
175 rc = handle_write_event_buf(ef, event_buf, sccb);
176 }
177 event_buf = (void *) event_buf + elen;
178 }
179 return rc;
180 }
181
182 static void write_event_data(SCLPEventFacility *ef, SCCB *sccb)
183 {
184 if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) {
185 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
186 return;
187 }
188 if (be16_to_cpu(sccb->h.length) < 8) {
189 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
190 return;
191 }
192 /* first do a sanity check of the write events */
193 sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb));
194
195 /* if no early error, then execute */
196 if (sccb->h.response_code == be16_to_cpu(SCLP_RC_NORMAL_COMPLETION)) {
197 sccb->h.response_code =
198 cpu_to_be16(handle_sccb_write_events(ef, sccb));
199 }
200 }
201
202 static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb,
203 sccb_mask_t mask)
204 {
205 uint16_t rc;
206 int slen;
207 unsigned elen;
208 BusChild *kid;
209 SCLPEvent *event;
210 SCLPEventClass *ec;
211 EventBufferHeader *event_buf;
212 ReadEventData *red = (ReadEventData *) sccb;
213
214 event_buf = &red->ebh;
215 event_buf->length = 0;
216 slen = sizeof(sccb->data);
217
218 rc = SCLP_RC_NO_EVENT_BUFFERS_STORED;
219
220 QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
221 DeviceState *qdev = kid->child;
222 event = (SCLPEvent *) qdev;
223 ec = SCLP_EVENT_GET_CLASS(event);
224
225 if (mask & ec->get_send_mask()) {
226 if (ec->read_event_data(event, event_buf, &slen)) {
227 elen = be16_to_cpu(event_buf->length);
228 event_buf = (EventBufferHeader *) ((char *)event_buf + elen);
229 rc = SCLP_RC_NORMAL_COMPLETION;
230 }
231 }
232 }
233
234 if (sccb->h.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE) {
235 /* architecture suggests to reset variable-length-response bit */
236 sccb->h.control_mask[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE;
237 /* with a new length value */
238 sccb->h.length = cpu_to_be16(SCCB_SIZE - slen);
239 }
240 return rc;
241 }
242
243 /* copy up to src_len bytes and fill the rest of dst with zeroes */
244 static void copy_mask(uint8_t *dst, uint8_t *src, uint16_t dst_len,
245 uint16_t src_len)
246 {
247 int i;
248
249 for (i = 0; i < dst_len; i++) {
250 dst[i] = i < src_len ? src[i] : 0;
251 }
252 }
253
254 static void read_event_data(SCLPEventFacility *ef, SCCB *sccb)
255 {
256 sccb_mask_t sclp_active_selection_mask;
257 sccb_mask_t sclp_cp_receive_mask;
258
259 ReadEventData *red = (ReadEventData *) sccb;
260
261 if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) {
262 sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
263 return;
264 }
265
266 switch (sccb->h.function_code) {
267 case SCLP_UNCONDITIONAL_READ:
268 sccb->h.response_code = cpu_to_be16(
269 handle_sccb_read_events(ef, sccb, ef->receive_mask));
270 break;
271 case SCLP_SELECTIVE_READ:
272 /* get active selection mask */
273 sclp_cp_receive_mask = ef->receive_mask;
274
275 copy_mask((uint8_t *)&sclp_active_selection_mask, (uint8_t *)&red->mask,
276 sizeof(sclp_active_selection_mask), ef->mask_length);
277 sclp_active_selection_mask = be64_to_cpu(sclp_active_selection_mask);
278 if (!sclp_cp_receive_mask ||
279 (sclp_active_selection_mask & ~sclp_cp_receive_mask)) {
280 sccb->h.response_code =
281 cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK);
282 } else {
283 sccb->h.response_code = cpu_to_be16(
284 handle_sccb_read_events(ef, sccb, sclp_active_selection_mask));
285 }
286 break;
287 default:
288 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
289 }
290 }
291
292 static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb)
293 {
294 WriteEventMask *we_mask = (WriteEventMask *) sccb;
295 uint16_t mask_length = be16_to_cpu(we_mask->mask_length);
296 sccb_mask_t tmp_mask;
297
298 if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) ||
299 ((mask_length != 4) && !ef->allow_all_mask_sizes)) {
300 sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH);
301 return;
302 }
303
304 /*
305 * Note: We currently only support masks up to 8 byte length;
306 * the remainder is filled up with zeroes. Older Linux
307 * kernels use a 4 byte mask length, newer ones can use both
308 * 8 or 4 depending on what is available on the host.
309 */
310
311 /* keep track of the guest's capability masks */
312 copy_mask((uint8_t *)&tmp_mask, WEM_CP_RECEIVE_MASK(we_mask, mask_length),
313 sizeof(tmp_mask), mask_length);
314 ef->receive_mask = be64_to_cpu(tmp_mask);
315
316 /* return the SCLP's capability masks to the guest */
317 tmp_mask = cpu_to_be64(get_host_receive_mask(ef));
318 copy_mask(WEM_RECEIVE_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
319 mask_length, sizeof(tmp_mask));
320 tmp_mask = cpu_to_be64(get_host_send_mask(ef));
321 copy_mask(WEM_SEND_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
322 mask_length, sizeof(tmp_mask));
323
324 sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
325 ef->mask_length = mask_length;
326 }
327
328 /* qemu object creation and initialization functions */
329
330 #define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus"
331
332 static const TypeInfo sclp_events_bus_info = {
333 .name = TYPE_SCLP_EVENTS_BUS,
334 .parent = TYPE_BUS,
335 };
336
337 static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code)
338 {
339 switch (code & SCLP_CMD_CODE_MASK) {
340 case SCLP_CMD_READ_EVENT_DATA:
341 read_event_data(ef, sccb);
342 break;
343 case SCLP_CMD_WRITE_EVENT_DATA:
344 write_event_data(ef, sccb);
345 break;
346 case SCLP_CMD_WRITE_EVENT_MASK:
347 write_event_mask(ef, sccb);
348 break;
349 }
350 }
351
352 static bool vmstate_event_facility_mask64_needed(void *opaque)
353 {
354 SCLPEventFacility *ef = opaque;
355
356 return (ef->receive_mask & 0xFFFFFFFF) != 0;
357 }
358
359 static bool vmstate_event_facility_mask_length_needed(void *opaque)
360 {
361 SCLPEventFacility *ef = opaque;
362
363 return ef->allow_all_mask_sizes;
364 }
365
366 static const VMStateDescription vmstate_event_facility_mask64 = {
367 .name = "vmstate-event-facility/mask64",
368 .version_id = 0,
369 .minimum_version_id = 0,
370 .needed = vmstate_event_facility_mask64_needed,
371 .fields = (VMStateField[]) {
372 VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_LOWER], SCLPEventFacility),
373 VMSTATE_END_OF_LIST()
374 }
375 };
376
377 static const VMStateDescription vmstate_event_facility_mask_length = {
378 .name = "vmstate-event-facility/mask_length",
379 .version_id = 0,
380 .minimum_version_id = 0,
381 .needed = vmstate_event_facility_mask_length_needed,
382 .fields = (VMStateField[]) {
383 VMSTATE_UINT16(mask_length, SCLPEventFacility),
384 VMSTATE_END_OF_LIST()
385 }
386 };
387
388 static const VMStateDescription vmstate_event_facility = {
389 .name = "vmstate-event-facility",
390 .version_id = 0,
391 .minimum_version_id = 0,
392 .fields = (VMStateField[]) {
393 VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_UPPER], SCLPEventFacility),
394 VMSTATE_END_OF_LIST()
395 },
396 .subsections = (const VMStateDescription * []) {
397 &vmstate_event_facility_mask64,
398 &vmstate_event_facility_mask_length,
399 NULL
400 }
401 };
402
403 static void sclp_event_set_allow_all_mask_sizes(Object *obj, bool value,
404 Error **errp)
405 {
406 SCLPEventFacility *ef = (SCLPEventFacility *)obj;
407
408 ef->allow_all_mask_sizes = value;
409 }
410
411 static bool sclp_event_get_allow_all_mask_sizes(Object *obj, Error **errp)
412 {
413 SCLPEventFacility *ef = (SCLPEventFacility *)obj;
414
415 return ef->allow_all_mask_sizes;
416 }
417
418 static void init_event_facility(Object *obj)
419 {
420 SCLPEventFacility *event_facility = EVENT_FACILITY(obj);
421 DeviceState *sdev = DEVICE(obj);
422
423 event_facility->mask_length = 4;
424 event_facility->allow_all_mask_sizes = true;
425 object_property_add_bool(obj, "allow_all_mask_sizes",
426 sclp_event_get_allow_all_mask_sizes,
427 sclp_event_set_allow_all_mask_sizes);
428
429 /* Spawn a new bus for SCLP events */
430 qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus),
431 TYPE_SCLP_EVENTS_BUS, sdev, NULL);
432
433 object_initialize_child(obj, TYPE_SCLP_QUIESCE,
434 &event_facility->quiesce,
435 TYPE_SCLP_QUIESCE);
436
437 object_initialize_child(obj, TYPE_SCLP_CPU_HOTPLUG,
438 &event_facility->cpu_hotplug,
439 TYPE_SCLP_CPU_HOTPLUG);
440 }
441
442 static void realize_event_facility(DeviceState *dev, Error **errp)
443 {
444 SCLPEventFacility *event_facility = EVENT_FACILITY(dev);
445
446 if (!qdev_realize(DEVICE(&event_facility->quiesce),
447 BUS(&event_facility->sbus), errp)) {
448 return;
449 }
450 if (!qdev_realize(DEVICE(&event_facility->cpu_hotplug),
451 BUS(&event_facility->sbus), errp)) {
452 qdev_unrealize(DEVICE(&event_facility->quiesce));
453 return;
454 }
455 }
456
457 static void reset_event_facility(DeviceState *dev)
458 {
459 SCLPEventFacility *sdev = EVENT_FACILITY(dev);
460
461 sdev->receive_mask = 0;
462 }
463
464 static void init_event_facility_class(ObjectClass *klass, void *data)
465 {
466 SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass);
467 DeviceClass *dc = DEVICE_CLASS(sbdc);
468 SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc);
469
470 dc->realize = realize_event_facility;
471 dc->reset = reset_event_facility;
472 dc->vmsd = &vmstate_event_facility;
473 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
474 k->command_handler = command_handler;
475 k->event_pending = event_pending;
476 }
477
478 static const TypeInfo sclp_event_facility_info = {
479 .name = TYPE_SCLP_EVENT_FACILITY,
480 .parent = TYPE_SYS_BUS_DEVICE,
481 .instance_init = init_event_facility,
482 .instance_size = sizeof(SCLPEventFacility),
483 .class_init = init_event_facility_class,
484 .class_size = sizeof(SCLPEventFacilityClass),
485 };
486
487 static void event_realize(DeviceState *qdev, Error **errp)
488 {
489 SCLPEvent *event = SCLP_EVENT(qdev);
490 SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event);
491
492 if (child->init) {
493 int rc = child->init(event);
494 if (rc < 0) {
495 error_setg(errp, "SCLP event initialization failed.");
496 return;
497 }
498 }
499 }
500
501 static void event_class_init(ObjectClass *klass, void *data)
502 {
503 DeviceClass *dc = DEVICE_CLASS(klass);
504
505 dc->bus_type = TYPE_SCLP_EVENTS_BUS;
506 dc->realize = event_realize;
507 }
508
509 static const TypeInfo sclp_event_type_info = {
510 .name = TYPE_SCLP_EVENT,
511 .parent = TYPE_DEVICE,
512 .instance_size = sizeof(SCLPEvent),
513 .class_init = event_class_init,
514 .class_size = sizeof(SCLPEventClass),
515 .abstract = true,
516 };
517
518 static void register_types(void)
519 {
520 type_register_static(&sclp_events_bus_info);
521 type_register_static(&sclp_event_facility_info);
522 type_register_static(&sclp_event_type_info);
523 }
524
525 type_init(register_types)
526
527 BusState *sclp_get_event_facility_bus(void)
528 {
529 Object *busobj;
530 SCLPEventsBus *sbus;
531
532 busobj = object_resolve_path_type("", TYPE_SCLP_EVENTS_BUS, NULL);
533 sbus = OBJECT_CHECK(SCLPEventsBus, busobj, TYPE_SCLP_EVENTS_BUS);
534 if (!sbus) {
535 return NULL;
536 }
537
538 return &sbus->qbus;
539 }