virtio-gpu: fix memory leak in virtio_gpu_resource_create_2d
[qemu.git] / hw / display / virtio-gpu.c
1 /*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
16 #include "qemu/iov.h"
17 #include "ui/console.h"
18 #include "trace.h"
19 #include "hw/virtio/virtio.h"
20 #include "hw/virtio/virtio-gpu.h"
21 #include "hw/virtio/virtio-bus.h"
22 #include "migration/migration.h"
23 #include "qemu/log.h"
24 #include "qapi/error.h"
25
26 #define VIRTIO_GPU_VM_VERSION 1
27
28 static struct virtio_gpu_simple_resource*
29 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
30
31 #ifdef CONFIG_VIRGL
32 #include <virglrenderer.h>
33 #define VIRGL(_g, _virgl, _simple, ...) \
34 do { \
35 if (_g->use_virgl_renderer) { \
36 _virgl(__VA_ARGS__); \
37 } else { \
38 _simple(__VA_ARGS__); \
39 } \
40 } while (0)
41 #else
42 #define VIRGL(_g, _virgl, _simple, ...) \
43 do { \
44 _simple(__VA_ARGS__); \
45 } while (0)
46 #endif
47
48 static void update_cursor_data_simple(VirtIOGPU *g,
49 struct virtio_gpu_scanout *s,
50 uint32_t resource_id)
51 {
52 struct virtio_gpu_simple_resource *res;
53 uint32_t pixels;
54
55 res = virtio_gpu_find_resource(g, resource_id);
56 if (!res) {
57 return;
58 }
59
60 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
61 pixman_image_get_height(res->image) != s->current_cursor->height) {
62 return;
63 }
64
65 pixels = s->current_cursor->width * s->current_cursor->height;
66 memcpy(s->current_cursor->data,
67 pixman_image_get_data(res->image),
68 pixels * sizeof(uint32_t));
69 }
70
71 #ifdef CONFIG_VIRGL
72
73 static void update_cursor_data_virgl(VirtIOGPU *g,
74 struct virtio_gpu_scanout *s,
75 uint32_t resource_id)
76 {
77 uint32_t width, height;
78 uint32_t pixels, *data;
79
80 data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
81 if (!data) {
82 return;
83 }
84
85 if (width != s->current_cursor->width ||
86 height != s->current_cursor->height) {
87 return;
88 }
89
90 pixels = s->current_cursor->width * s->current_cursor->height;
91 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
92 free(data);
93 }
94
95 #endif
96
97 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
98 {
99 struct virtio_gpu_scanout *s;
100 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
101
102 if (cursor->pos.scanout_id >= g->conf.max_outputs) {
103 return;
104 }
105 s = &g->scanout[cursor->pos.scanout_id];
106
107 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
108 cursor->pos.x,
109 cursor->pos.y,
110 move ? "move" : "update",
111 cursor->resource_id);
112
113 if (!move) {
114 if (!s->current_cursor) {
115 s->current_cursor = cursor_alloc(64, 64);
116 }
117
118 s->current_cursor->hot_x = cursor->hot_x;
119 s->current_cursor->hot_y = cursor->hot_y;
120
121 if (cursor->resource_id > 0) {
122 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
123 g, s, cursor->resource_id);
124 }
125 dpy_cursor_define(s->con, s->current_cursor);
126
127 s->cursor = *cursor;
128 } else {
129 s->cursor.pos.x = cursor->pos.x;
130 s->cursor.pos.y = cursor->pos.y;
131 }
132 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
133 cursor->resource_id ? 1 : 0);
134 }
135
136 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
137 {
138 VirtIOGPU *g = VIRTIO_GPU(vdev);
139 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
140 }
141
142 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
143 {
144 VirtIOGPU *g = VIRTIO_GPU(vdev);
145 struct virtio_gpu_config vgconfig;
146
147 memcpy(&vgconfig, config, sizeof(g->virtio_config));
148
149 if (vgconfig.events_clear) {
150 g->virtio_config.events_read &= ~vgconfig.events_clear;
151 }
152 }
153
154 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
155 Error **errp)
156 {
157 VirtIOGPU *g = VIRTIO_GPU(vdev);
158
159 if (virtio_gpu_virgl_enabled(g->conf)) {
160 features |= (1 << VIRTIO_GPU_F_VIRGL);
161 }
162 return features;
163 }
164
165 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
166 {
167 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
168 VirtIOGPU *g = VIRTIO_GPU(vdev);
169
170 g->use_virgl_renderer = ((features & virgl) == virgl);
171 trace_virtio_gpu_features(g->use_virgl_renderer);
172 }
173
174 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
175 {
176 g->virtio_config.events_read |= event_type;
177 virtio_notify_config(&g->parent_obj);
178 }
179
180 static struct virtio_gpu_simple_resource *
181 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
182 {
183 struct virtio_gpu_simple_resource *res;
184
185 QTAILQ_FOREACH(res, &g->reslist, next) {
186 if (res->resource_id == resource_id) {
187 return res;
188 }
189 }
190 return NULL;
191 }
192
193 void virtio_gpu_ctrl_response(VirtIOGPU *g,
194 struct virtio_gpu_ctrl_command *cmd,
195 struct virtio_gpu_ctrl_hdr *resp,
196 size_t resp_len)
197 {
198 size_t s;
199
200 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
201 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
202 resp->fence_id = cmd->cmd_hdr.fence_id;
203 resp->ctx_id = cmd->cmd_hdr.ctx_id;
204 }
205 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
206 if (s != resp_len) {
207 qemu_log_mask(LOG_GUEST_ERROR,
208 "%s: response size incorrect %zu vs %zu\n",
209 __func__, s, resp_len);
210 }
211 virtqueue_push(cmd->vq, &cmd->elem, s);
212 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
213 cmd->finished = true;
214 }
215
216 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
217 struct virtio_gpu_ctrl_command *cmd,
218 enum virtio_gpu_ctrl_type type)
219 {
220 struct virtio_gpu_ctrl_hdr resp;
221
222 memset(&resp, 0, sizeof(resp));
223 resp.type = type;
224 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
225 }
226
227 static void
228 virtio_gpu_fill_display_info(VirtIOGPU *g,
229 struct virtio_gpu_resp_display_info *dpy_info)
230 {
231 int i;
232
233 for (i = 0; i < g->conf.max_outputs; i++) {
234 if (g->enabled_output_bitmask & (1 << i)) {
235 dpy_info->pmodes[i].enabled = 1;
236 dpy_info->pmodes[i].r.width = g->req_state[i].width;
237 dpy_info->pmodes[i].r.height = g->req_state[i].height;
238 }
239 }
240 }
241
242 void virtio_gpu_get_display_info(VirtIOGPU *g,
243 struct virtio_gpu_ctrl_command *cmd)
244 {
245 struct virtio_gpu_resp_display_info display_info;
246
247 trace_virtio_gpu_cmd_get_display_info();
248 memset(&display_info, 0, sizeof(display_info));
249 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
250 virtio_gpu_fill_display_info(g, &display_info);
251 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
252 sizeof(display_info));
253 }
254
255 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
256 {
257 switch (virtio_gpu_format) {
258 #ifdef HOST_WORDS_BIGENDIAN
259 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
260 return PIXMAN_b8g8r8x8;
261 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
262 return PIXMAN_b8g8r8a8;
263 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
264 return PIXMAN_x8r8g8b8;
265 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
266 return PIXMAN_a8r8g8b8;
267 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
268 return PIXMAN_r8g8b8x8;
269 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
270 return PIXMAN_r8g8b8a8;
271 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
272 return PIXMAN_x8b8g8r8;
273 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
274 return PIXMAN_a8b8g8r8;
275 #else
276 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
277 return PIXMAN_x8r8g8b8;
278 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
279 return PIXMAN_a8r8g8b8;
280 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
281 return PIXMAN_b8g8r8x8;
282 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
283 return PIXMAN_b8g8r8a8;
284 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
285 return PIXMAN_x8b8g8r8;
286 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
287 return PIXMAN_a8b8g8r8;
288 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
289 return PIXMAN_r8g8b8x8;
290 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
291 return PIXMAN_r8g8b8a8;
292 #endif
293 default:
294 return 0;
295 }
296 }
297
298 static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
299 struct virtio_gpu_ctrl_command *cmd)
300 {
301 pixman_format_code_t pformat;
302 struct virtio_gpu_simple_resource *res;
303 struct virtio_gpu_resource_create_2d c2d;
304
305 VIRTIO_GPU_FILL_CMD(c2d);
306 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
307 c2d.width, c2d.height);
308
309 if (c2d.resource_id == 0) {
310 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
311 __func__);
312 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
313 return;
314 }
315
316 res = virtio_gpu_find_resource(g, c2d.resource_id);
317 if (res) {
318 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
319 __func__, c2d.resource_id);
320 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
321 return;
322 }
323
324 res = g_new0(struct virtio_gpu_simple_resource, 1);
325
326 res->width = c2d.width;
327 res->height = c2d.height;
328 res->format = c2d.format;
329 res->resource_id = c2d.resource_id;
330
331 pformat = get_pixman_format(c2d.format);
332 if (!pformat) {
333 qemu_log_mask(LOG_GUEST_ERROR,
334 "%s: host couldn't handle guest format %d\n",
335 __func__, c2d.format);
336 g_free(res);
337 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
338 return;
339 }
340 res->image = pixman_image_create_bits(pformat,
341 c2d.width,
342 c2d.height,
343 NULL, 0);
344
345 if (!res->image) {
346 qemu_log_mask(LOG_GUEST_ERROR,
347 "%s: resource creation failed %d %d %d\n",
348 __func__, c2d.resource_id, c2d.width, c2d.height);
349 g_free(res);
350 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
351 return;
352 }
353
354 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
355 }
356
357 static void virtio_gpu_resource_destroy(VirtIOGPU *g,
358 struct virtio_gpu_simple_resource *res)
359 {
360 pixman_image_unref(res->image);
361 QTAILQ_REMOVE(&g->reslist, res, next);
362 g_free(res);
363 }
364
365 static void virtio_gpu_resource_unref(VirtIOGPU *g,
366 struct virtio_gpu_ctrl_command *cmd)
367 {
368 struct virtio_gpu_simple_resource *res;
369 struct virtio_gpu_resource_unref unref;
370
371 VIRTIO_GPU_FILL_CMD(unref);
372 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
373
374 res = virtio_gpu_find_resource(g, unref.resource_id);
375 if (!res) {
376 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
377 __func__, unref.resource_id);
378 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
379 return;
380 }
381 virtio_gpu_resource_destroy(g, res);
382 }
383
384 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
385 struct virtio_gpu_ctrl_command *cmd)
386 {
387 struct virtio_gpu_simple_resource *res;
388 int h;
389 uint32_t src_offset, dst_offset, stride;
390 int bpp;
391 pixman_format_code_t format;
392 struct virtio_gpu_transfer_to_host_2d t2d;
393
394 VIRTIO_GPU_FILL_CMD(t2d);
395 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
396
397 res = virtio_gpu_find_resource(g, t2d.resource_id);
398 if (!res || !res->iov) {
399 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
400 __func__, t2d.resource_id);
401 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
402 return;
403 }
404
405 if (t2d.r.x > res->width ||
406 t2d.r.y > res->height ||
407 t2d.r.width > res->width ||
408 t2d.r.height > res->height ||
409 t2d.r.x + t2d.r.width > res->width ||
410 t2d.r.y + t2d.r.height > res->height) {
411 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
412 " bounds for resource %d: %d %d %d %d vs %d %d\n",
413 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
414 t2d.r.width, t2d.r.height, res->width, res->height);
415 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
416 return;
417 }
418
419 format = pixman_image_get_format(res->image);
420 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
421 stride = pixman_image_get_stride(res->image);
422
423 if (t2d.offset || t2d.r.x || t2d.r.y ||
424 t2d.r.width != pixman_image_get_width(res->image)) {
425 void *img_data = pixman_image_get_data(res->image);
426 for (h = 0; h < t2d.r.height; h++) {
427 src_offset = t2d.offset + stride * h;
428 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
429
430 iov_to_buf(res->iov, res->iov_cnt, src_offset,
431 (uint8_t *)img_data
432 + dst_offset, t2d.r.width * bpp);
433 }
434 } else {
435 iov_to_buf(res->iov, res->iov_cnt, 0,
436 pixman_image_get_data(res->image),
437 pixman_image_get_stride(res->image)
438 * pixman_image_get_height(res->image));
439 }
440 }
441
442 static void virtio_gpu_resource_flush(VirtIOGPU *g,
443 struct virtio_gpu_ctrl_command *cmd)
444 {
445 struct virtio_gpu_simple_resource *res;
446 struct virtio_gpu_resource_flush rf;
447 pixman_region16_t flush_region;
448 int i;
449
450 VIRTIO_GPU_FILL_CMD(rf);
451 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
452 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
453
454 res = virtio_gpu_find_resource(g, rf.resource_id);
455 if (!res) {
456 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
457 __func__, rf.resource_id);
458 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
459 return;
460 }
461
462 if (rf.r.x > res->width ||
463 rf.r.y > res->height ||
464 rf.r.width > res->width ||
465 rf.r.height > res->height ||
466 rf.r.x + rf.r.width > res->width ||
467 rf.r.y + rf.r.height > res->height) {
468 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
469 " bounds for resource %d: %d %d %d %d vs %d %d\n",
470 __func__, rf.resource_id, rf.r.x, rf.r.y,
471 rf.r.width, rf.r.height, res->width, res->height);
472 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
473 return;
474 }
475
476 pixman_region_init_rect(&flush_region,
477 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
478 for (i = 0; i < g->conf.max_outputs; i++) {
479 struct virtio_gpu_scanout *scanout;
480 pixman_region16_t region, finalregion;
481 pixman_box16_t *extents;
482
483 if (!(res->scanout_bitmask & (1 << i))) {
484 continue;
485 }
486 scanout = &g->scanout[i];
487
488 pixman_region_init(&finalregion);
489 pixman_region_init_rect(&region, scanout->x, scanout->y,
490 scanout->width, scanout->height);
491
492 pixman_region_intersect(&finalregion, &flush_region, &region);
493 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
494 extents = pixman_region_extents(&finalregion);
495 /* work out the area we need to update for each console */
496 dpy_gfx_update(g->scanout[i].con,
497 extents->x1, extents->y1,
498 extents->x2 - extents->x1,
499 extents->y2 - extents->y1);
500
501 pixman_region_fini(&region);
502 pixman_region_fini(&finalregion);
503 }
504 pixman_region_fini(&flush_region);
505 }
506
507 static void virtio_unref_resource(pixman_image_t *image, void *data)
508 {
509 pixman_image_unref(data);
510 }
511
512 static void virtio_gpu_set_scanout(VirtIOGPU *g,
513 struct virtio_gpu_ctrl_command *cmd)
514 {
515 struct virtio_gpu_simple_resource *res;
516 struct virtio_gpu_scanout *scanout;
517 pixman_format_code_t format;
518 uint32_t offset;
519 int bpp;
520 struct virtio_gpu_set_scanout ss;
521
522 VIRTIO_GPU_FILL_CMD(ss);
523 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
524 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
525
526 if (ss.scanout_id >= g->conf.max_outputs) {
527 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
528 __func__, ss.scanout_id);
529 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
530 return;
531 }
532
533 g->enable = 1;
534 if (ss.resource_id == 0) {
535 scanout = &g->scanout[ss.scanout_id];
536 if (scanout->resource_id) {
537 res = virtio_gpu_find_resource(g, scanout->resource_id);
538 if (res) {
539 res->scanout_bitmask &= ~(1 << ss.scanout_id);
540 }
541 }
542 if (ss.scanout_id == 0) {
543 qemu_log_mask(LOG_GUEST_ERROR,
544 "%s: illegal scanout id specified %d",
545 __func__, ss.scanout_id);
546 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
547 return;
548 }
549 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
550 scanout->ds = NULL;
551 scanout->width = 0;
552 scanout->height = 0;
553 return;
554 }
555
556 /* create a surface for this scanout */
557 res = virtio_gpu_find_resource(g, ss.resource_id);
558 if (!res) {
559 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
560 __func__, ss.resource_id);
561 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
562 return;
563 }
564
565 if (ss.r.x > res->width ||
566 ss.r.y > res->height ||
567 ss.r.width > res->width ||
568 ss.r.height > res->height ||
569 ss.r.x + ss.r.width > res->width ||
570 ss.r.y + ss.r.height > res->height) {
571 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
572 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
573 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
574 ss.r.width, ss.r.height, res->width, res->height);
575 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
576 return;
577 }
578
579 scanout = &g->scanout[ss.scanout_id];
580
581 format = pixman_image_get_format(res->image);
582 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
583 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
584 if (!scanout->ds || surface_data(scanout->ds)
585 != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
586 scanout->width != ss.r.width ||
587 scanout->height != ss.r.height) {
588 pixman_image_t *rect;
589 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
590 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
591 pixman_image_get_stride(res->image));
592 pixman_image_ref(res->image);
593 pixman_image_set_destroy_function(rect, virtio_unref_resource,
594 res->image);
595 /* realloc the surface ptr */
596 scanout->ds = qemu_create_displaysurface_pixman(rect);
597 if (!scanout->ds) {
598 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
599 return;
600 }
601 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
602 }
603
604 res->scanout_bitmask |= (1 << ss.scanout_id);
605 scanout->resource_id = ss.resource_id;
606 scanout->x = ss.r.x;
607 scanout->y = ss.r.y;
608 scanout->width = ss.r.width;
609 scanout->height = ss.r.height;
610 }
611
612 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
613 struct virtio_gpu_ctrl_command *cmd,
614 uint64_t **addr, struct iovec **iov)
615 {
616 struct virtio_gpu_mem_entry *ents;
617 size_t esize, s;
618 int i;
619
620 if (ab->nr_entries > 16384) {
621 qemu_log_mask(LOG_GUEST_ERROR,
622 "%s: nr_entries is too big (%d > 16384)\n",
623 __func__, ab->nr_entries);
624 return -1;
625 }
626
627 esize = sizeof(*ents) * ab->nr_entries;
628 ents = g_malloc(esize);
629 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
630 sizeof(*ab), ents, esize);
631 if (s != esize) {
632 qemu_log_mask(LOG_GUEST_ERROR,
633 "%s: command data size incorrect %zu vs %zu\n",
634 __func__, s, esize);
635 g_free(ents);
636 return -1;
637 }
638
639 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
640 if (addr) {
641 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
642 }
643 for (i = 0; i < ab->nr_entries; i++) {
644 hwaddr len = ents[i].length;
645 (*iov)[i].iov_len = ents[i].length;
646 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1);
647 if (addr) {
648 (*addr)[i] = ents[i].addr;
649 }
650 if (!(*iov)[i].iov_base || len != ents[i].length) {
651 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
652 " resource %d element %d\n",
653 __func__, ab->resource_id, i);
654 virtio_gpu_cleanup_mapping_iov(*iov, i);
655 g_free(ents);
656 *iov = NULL;
657 if (addr) {
658 g_free(*addr);
659 *addr = NULL;
660 }
661 return -1;
662 }
663 }
664 g_free(ents);
665 return 0;
666 }
667
668 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count)
669 {
670 int i;
671
672 for (i = 0; i < count; i++) {
673 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1,
674 iov[i].iov_len);
675 }
676 g_free(iov);
677 }
678
679 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
680 {
681 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
682 res->iov = NULL;
683 res->iov_cnt = 0;
684 g_free(res->addrs);
685 res->addrs = NULL;
686 }
687
688 static void
689 virtio_gpu_resource_attach_backing(VirtIOGPU *g,
690 struct virtio_gpu_ctrl_command *cmd)
691 {
692 struct virtio_gpu_simple_resource *res;
693 struct virtio_gpu_resource_attach_backing ab;
694 int ret;
695
696 VIRTIO_GPU_FILL_CMD(ab);
697 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
698
699 res = virtio_gpu_find_resource(g, ab.resource_id);
700 if (!res) {
701 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
702 __func__, ab.resource_id);
703 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
704 return;
705 }
706
707 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
708 if (ret != 0) {
709 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
710 return;
711 }
712
713 res->iov_cnt = ab.nr_entries;
714 }
715
716 static void
717 virtio_gpu_resource_detach_backing(VirtIOGPU *g,
718 struct virtio_gpu_ctrl_command *cmd)
719 {
720 struct virtio_gpu_simple_resource *res;
721 struct virtio_gpu_resource_detach_backing detach;
722
723 VIRTIO_GPU_FILL_CMD(detach);
724 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
725
726 res = virtio_gpu_find_resource(g, detach.resource_id);
727 if (!res || !res->iov) {
728 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
729 __func__, detach.resource_id);
730 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
731 return;
732 }
733 virtio_gpu_cleanup_mapping(res);
734 }
735
736 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
737 struct virtio_gpu_ctrl_command *cmd)
738 {
739 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
740
741 switch (cmd->cmd_hdr.type) {
742 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
743 virtio_gpu_get_display_info(g, cmd);
744 break;
745 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
746 virtio_gpu_resource_create_2d(g, cmd);
747 break;
748 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
749 virtio_gpu_resource_unref(g, cmd);
750 break;
751 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
752 virtio_gpu_resource_flush(g, cmd);
753 break;
754 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
755 virtio_gpu_transfer_to_host_2d(g, cmd);
756 break;
757 case VIRTIO_GPU_CMD_SET_SCANOUT:
758 virtio_gpu_set_scanout(g, cmd);
759 break;
760 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
761 virtio_gpu_resource_attach_backing(g, cmd);
762 break;
763 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
764 virtio_gpu_resource_detach_backing(g, cmd);
765 break;
766 default:
767 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
768 break;
769 }
770 if (!cmd->finished) {
771 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
772 VIRTIO_GPU_RESP_OK_NODATA);
773 }
774 }
775
776 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
777 {
778 VirtIOGPU *g = VIRTIO_GPU(vdev);
779 qemu_bh_schedule(g->ctrl_bh);
780 }
781
782 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
783 {
784 VirtIOGPU *g = VIRTIO_GPU(vdev);
785 qemu_bh_schedule(g->cursor_bh);
786 }
787
788 void virtio_gpu_process_cmdq(VirtIOGPU *g)
789 {
790 struct virtio_gpu_ctrl_command *cmd;
791
792 while (!QTAILQ_EMPTY(&g->cmdq)) {
793 cmd = QTAILQ_FIRST(&g->cmdq);
794
795 /* process command */
796 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
797 g, cmd);
798 if (cmd->waiting) {
799 break;
800 }
801 QTAILQ_REMOVE(&g->cmdq, cmd, next);
802 if (virtio_gpu_stats_enabled(g->conf)) {
803 g->stats.requests++;
804 }
805
806 if (!cmd->finished) {
807 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
808 g->inflight++;
809 if (virtio_gpu_stats_enabled(g->conf)) {
810 if (g->stats.max_inflight < g->inflight) {
811 g->stats.max_inflight = g->inflight;
812 }
813 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
814 }
815 } else {
816 g_free(cmd);
817 }
818 }
819 }
820
821 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
822 {
823 VirtIOGPU *g = VIRTIO_GPU(vdev);
824 struct virtio_gpu_ctrl_command *cmd;
825
826 if (!virtio_queue_ready(vq)) {
827 return;
828 }
829
830 #ifdef CONFIG_VIRGL
831 if (!g->renderer_inited && g->use_virgl_renderer) {
832 virtio_gpu_virgl_init(g);
833 g->renderer_inited = true;
834 }
835 #endif
836
837 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
838 while (cmd) {
839 cmd->vq = vq;
840 cmd->error = 0;
841 cmd->finished = false;
842 cmd->waiting = false;
843 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
844 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
845 }
846
847 virtio_gpu_process_cmdq(g);
848
849 #ifdef CONFIG_VIRGL
850 if (g->use_virgl_renderer) {
851 virtio_gpu_virgl_fence_poll(g);
852 }
853 #endif
854 }
855
856 static void virtio_gpu_ctrl_bh(void *opaque)
857 {
858 VirtIOGPU *g = opaque;
859 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
860 }
861
862 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
863 {
864 VirtIOGPU *g = VIRTIO_GPU(vdev);
865 VirtQueueElement *elem;
866 size_t s;
867 struct virtio_gpu_update_cursor cursor_info;
868
869 if (!virtio_queue_ready(vq)) {
870 return;
871 }
872 for (;;) {
873 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
874 if (!elem) {
875 break;
876 }
877
878 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
879 &cursor_info, sizeof(cursor_info));
880 if (s != sizeof(cursor_info)) {
881 qemu_log_mask(LOG_GUEST_ERROR,
882 "%s: cursor size incorrect %zu vs %zu\n",
883 __func__, s, sizeof(cursor_info));
884 } else {
885 update_cursor(g, &cursor_info);
886 }
887 virtqueue_push(vq, elem, 0);
888 virtio_notify(vdev, vq);
889 g_free(elem);
890 }
891 }
892
893 static void virtio_gpu_cursor_bh(void *opaque)
894 {
895 VirtIOGPU *g = opaque;
896 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
897 }
898
899 static void virtio_gpu_invalidate_display(void *opaque)
900 {
901 }
902
903 static void virtio_gpu_update_display(void *opaque)
904 {
905 }
906
907 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
908 {
909 }
910
911 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
912 {
913 VirtIOGPU *g = opaque;
914
915 if (idx >= g->conf.max_outputs) {
916 return -1;
917 }
918
919 g->req_state[idx].x = info->xoff;
920 g->req_state[idx].y = info->yoff;
921 g->req_state[idx].width = info->width;
922 g->req_state[idx].height = info->height;
923
924 if (info->width && info->height) {
925 g->enabled_output_bitmask |= (1 << idx);
926 } else {
927 g->enabled_output_bitmask &= ~(1 << idx);
928 }
929
930 /* send event to guest */
931 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
932 return 0;
933 }
934
935 static void virtio_gpu_gl_block(void *opaque, bool block)
936 {
937 VirtIOGPU *g = opaque;
938
939 if (block) {
940 g->renderer_blocked++;
941 } else {
942 g->renderer_blocked--;
943 }
944 assert(g->renderer_blocked >= 0);
945
946 if (g->renderer_blocked == 0) {
947 virtio_gpu_process_cmdq(g);
948 }
949 }
950
951 const GraphicHwOps virtio_gpu_ops = {
952 .invalidate = virtio_gpu_invalidate_display,
953 .gfx_update = virtio_gpu_update_display,
954 .text_update = virtio_gpu_text_update,
955 .ui_info = virtio_gpu_ui_info,
956 .gl_block = virtio_gpu_gl_block,
957 };
958
959 static const VMStateDescription vmstate_virtio_gpu_scanout = {
960 .name = "virtio-gpu-one-scanout",
961 .version_id = 1,
962 .fields = (VMStateField[]) {
963 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
964 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
965 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
966 VMSTATE_INT32(x, struct virtio_gpu_scanout),
967 VMSTATE_INT32(y, struct virtio_gpu_scanout),
968 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
969 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
970 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
971 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
972 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
973 VMSTATE_END_OF_LIST()
974 },
975 };
976
977 static const VMStateDescription vmstate_virtio_gpu_scanouts = {
978 .name = "virtio-gpu-scanouts",
979 .version_id = 1,
980 .fields = (VMStateField[]) {
981 VMSTATE_INT32(enable, struct VirtIOGPU),
982 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU),
983 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
984 conf.max_outputs, 1,
985 vmstate_virtio_gpu_scanout,
986 struct virtio_gpu_scanout),
987 VMSTATE_END_OF_LIST()
988 },
989 };
990
991 static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size)
992 {
993 VirtIOGPU *g = opaque;
994 struct virtio_gpu_simple_resource *res;
995 int i;
996
997 /* in 2d mode we should never find unprocessed commands here */
998 assert(QTAILQ_EMPTY(&g->cmdq));
999
1000 QTAILQ_FOREACH(res, &g->reslist, next) {
1001 qemu_put_be32(f, res->resource_id);
1002 qemu_put_be32(f, res->width);
1003 qemu_put_be32(f, res->height);
1004 qemu_put_be32(f, res->format);
1005 qemu_put_be32(f, res->iov_cnt);
1006 for (i = 0; i < res->iov_cnt; i++) {
1007 qemu_put_be64(f, res->addrs[i]);
1008 qemu_put_be32(f, res->iov[i].iov_len);
1009 }
1010 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1011 pixman_image_get_stride(res->image) * res->height);
1012 }
1013 qemu_put_be32(f, 0); /* end of list */
1014
1015 vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1016 }
1017
1018 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size)
1019 {
1020 VirtIOGPU *g = opaque;
1021 struct virtio_gpu_simple_resource *res;
1022 struct virtio_gpu_scanout *scanout;
1023 uint32_t resource_id, pformat;
1024 int i;
1025
1026 resource_id = qemu_get_be32(f);
1027 while (resource_id != 0) {
1028 res = g_new0(struct virtio_gpu_simple_resource, 1);
1029 res->resource_id = resource_id;
1030 res->width = qemu_get_be32(f);
1031 res->height = qemu_get_be32(f);
1032 res->format = qemu_get_be32(f);
1033 res->iov_cnt = qemu_get_be32(f);
1034
1035 /* allocate */
1036 pformat = get_pixman_format(res->format);
1037 if (!pformat) {
1038 return -EINVAL;
1039 }
1040 res->image = pixman_image_create_bits(pformat,
1041 res->width, res->height,
1042 NULL, 0);
1043 if (!res->image) {
1044 return -EINVAL;
1045 }
1046
1047 res->addrs = g_new(uint64_t, res->iov_cnt);
1048 res->iov = g_new(struct iovec, res->iov_cnt);
1049
1050 /* read data */
1051 for (i = 0; i < res->iov_cnt; i++) {
1052 res->addrs[i] = qemu_get_be64(f);
1053 res->iov[i].iov_len = qemu_get_be32(f);
1054 }
1055 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1056 pixman_image_get_stride(res->image) * res->height);
1057
1058 /* restore mapping */
1059 for (i = 0; i < res->iov_cnt; i++) {
1060 hwaddr len = res->iov[i].iov_len;
1061 res->iov[i].iov_base =
1062 cpu_physical_memory_map(res->addrs[i], &len, 1);
1063 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1064 return -EINVAL;
1065 }
1066 }
1067
1068 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1069
1070 resource_id = qemu_get_be32(f);
1071 }
1072
1073 /* load & apply scanout state */
1074 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1075 for (i = 0; i < g->conf.max_outputs; i++) {
1076 scanout = &g->scanout[i];
1077 if (!scanout->resource_id) {
1078 continue;
1079 }
1080 res = virtio_gpu_find_resource(g, scanout->resource_id);
1081 if (!res) {
1082 return -EINVAL;
1083 }
1084 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1085 if (!scanout->ds) {
1086 return -EINVAL;
1087 }
1088
1089 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1090 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height);
1091 update_cursor(g, &scanout->cursor);
1092 res->scanout_bitmask |= (1 << i);
1093 }
1094
1095 return 0;
1096 }
1097
1098 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1099 {
1100 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1101 VirtIOGPU *g = VIRTIO_GPU(qdev);
1102 bool have_virgl;
1103 int i;
1104
1105 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
1106 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
1107 return;
1108 }
1109
1110 g->config_size = sizeof(struct virtio_gpu_config);
1111 g->virtio_config.num_scanouts = g->conf.max_outputs;
1112 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
1113 g->config_size);
1114
1115 g->req_state[0].width = 1024;
1116 g->req_state[0].height = 768;
1117
1118 g->use_virgl_renderer = false;
1119 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1120 have_virgl = false;
1121 #else
1122 have_virgl = display_opengl;
1123 #endif
1124 if (!have_virgl) {
1125 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1126 }
1127
1128 if (virtio_gpu_virgl_enabled(g->conf)) {
1129 /* use larger control queue in 3d mode */
1130 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
1131 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1132 g->virtio_config.num_capsets = 1;
1133 } else {
1134 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
1135 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1136 }
1137
1138 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1139 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1140 QTAILQ_INIT(&g->reslist);
1141 QTAILQ_INIT(&g->cmdq);
1142 QTAILQ_INIT(&g->fenceq);
1143
1144 g->enabled_output_bitmask = 1;
1145 g->qdev = qdev;
1146
1147 for (i = 0; i < g->conf.max_outputs; i++) {
1148 g->scanout[i].con =
1149 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
1150 if (i > 0) {
1151 dpy_gfx_replace_surface(g->scanout[i].con, NULL);
1152 }
1153 }
1154
1155 if (virtio_gpu_virgl_enabled(g->conf)) {
1156 error_setg(&g->migration_blocker, "virgl is not yet migratable");
1157 migrate_add_blocker(g->migration_blocker);
1158 }
1159 }
1160
1161 static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
1162 {
1163 VirtIOGPU *g = VIRTIO_GPU(qdev);
1164 if (g->migration_blocker) {
1165 migrate_del_blocker(g->migration_blocker);
1166 error_free(g->migration_blocker);
1167 }
1168 }
1169
1170 static void virtio_gpu_instance_init(Object *obj)
1171 {
1172 }
1173
1174 static void virtio_gpu_reset(VirtIODevice *vdev)
1175 {
1176 VirtIOGPU *g = VIRTIO_GPU(vdev);
1177 struct virtio_gpu_simple_resource *res, *tmp;
1178 int i;
1179
1180 g->enable = 0;
1181
1182 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1183 virtio_gpu_resource_destroy(g, res);
1184 }
1185 for (i = 0; i < g->conf.max_outputs; i++) {
1186 #if 0
1187 g->req_state[i].x = 0;
1188 g->req_state[i].y = 0;
1189 if (i == 0) {
1190 g->req_state[0].width = 1024;
1191 g->req_state[0].height = 768;
1192 } else {
1193 g->req_state[i].width = 0;
1194 g->req_state[i].height = 0;
1195 }
1196 #endif
1197 g->scanout[i].resource_id = 0;
1198 g->scanout[i].width = 0;
1199 g->scanout[i].height = 0;
1200 g->scanout[i].x = 0;
1201 g->scanout[i].y = 0;
1202 g->scanout[i].ds = NULL;
1203 }
1204 g->enabled_output_bitmask = 1;
1205
1206 #ifdef CONFIG_VIRGL
1207 if (g->use_virgl_renderer) {
1208 virtio_gpu_virgl_reset(g);
1209 g->use_virgl_renderer = 0;
1210 }
1211 #endif
1212 }
1213
1214 /*
1215 * For historical reasons virtio_gpu does not adhere to virtio migration
1216 * scheme as described in doc/virtio-migration.txt, in a sense that no
1217 * save/load callback are provided to the core. Instead the device data
1218 * is saved/loaded after the core data.
1219 *
1220 * Because of this we need a special vmsd.
1221 */
1222 static const VMStateDescription vmstate_virtio_gpu = {
1223 .name = "virtio-gpu",
1224 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1225 .version_id = VIRTIO_GPU_VM_VERSION,
1226 .fields = (VMStateField[]) {
1227 VMSTATE_VIRTIO_DEVICE /* core */,
1228 {
1229 .name = "virtio-gpu",
1230 .info = &(const VMStateInfo) {
1231 .name = "virtio-gpu",
1232 .get = virtio_gpu_load,
1233 .put = virtio_gpu_save,
1234 },
1235 .flags = VMS_SINGLE,
1236 } /* device */,
1237 VMSTATE_END_OF_LIST()
1238 },
1239 };
1240
1241 static Property virtio_gpu_properties[] = {
1242 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
1243 #ifdef CONFIG_VIRGL
1244 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
1245 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1246 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
1247 VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1248 #endif
1249 DEFINE_PROP_END_OF_LIST(),
1250 };
1251
1252 static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1253 {
1254 DeviceClass *dc = DEVICE_CLASS(klass);
1255 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1256
1257 vdc->realize = virtio_gpu_device_realize;
1258 vdc->unrealize = virtio_gpu_device_unrealize;
1259 vdc->get_config = virtio_gpu_get_config;
1260 vdc->set_config = virtio_gpu_set_config;
1261 vdc->get_features = virtio_gpu_get_features;
1262 vdc->set_features = virtio_gpu_set_features;
1263
1264 vdc->reset = virtio_gpu_reset;
1265
1266 dc->props = virtio_gpu_properties;
1267 dc->vmsd = &vmstate_virtio_gpu;
1268 }
1269
1270 static const TypeInfo virtio_gpu_info = {
1271 .name = TYPE_VIRTIO_GPU,
1272 .parent = TYPE_VIRTIO_DEVICE,
1273 .instance_size = sizeof(VirtIOGPU),
1274 .instance_init = virtio_gpu_instance_init,
1275 .class_init = virtio_gpu_class_init,
1276 };
1277
1278 static void virtio_register_types(void)
1279 {
1280 type_register_static(&virtio_gpu_info);
1281 }
1282
1283 type_init(virtio_register_types)
1284
1285 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
1286 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
1287 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
1288 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
1289 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
1290 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
1291 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
1292 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
1293 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
1294 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
1295 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
1296
1297 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
1298 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
1299 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
1300 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
1301 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
1302 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
1303 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
1304 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
1305 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
1306 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);