Merge tag 'pull-la-20220705' of https://gitlab.com/rth7680/qemu into staging
[qemu.git] / contrib / vhost-user-gpu / vhost-user-gpu.c
1 /*
2 * Virtio vhost-user GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2018
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 */
14 #include "qemu/osdep.h"
15 #include "qemu/drm.h"
16 #include "qapi/error.h"
17 #include "qemu/sockets.h"
18
19 #include <pixman.h>
20 #include <glib-unix.h>
21
22 #include "vugpu.h"
23 #include "hw/virtio/virtio-gpu-bswap.h"
24 #include "hw/virtio/virtio-gpu-pixman.h"
25 #include "virgl.h"
26 #include "vugbm.h"
27
28 enum {
29 VHOST_USER_GPU_MAX_QUEUES = 2,
30 };
31
32 struct virtio_gpu_simple_resource {
33 uint32_t resource_id;
34 uint32_t width;
35 uint32_t height;
36 uint32_t format;
37 struct iovec *iov;
38 unsigned int iov_cnt;
39 uint32_t scanout_bitmask;
40 pixman_image_t *image;
41 struct vugbm_buffer buffer;
42 QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
43 };
44
45 static gboolean opt_print_caps;
46 static int opt_fdnum = -1;
47 static char *opt_socket_path;
48 static char *opt_render_node;
49 static gboolean opt_virgl;
50
51 static void vg_handle_ctrl(VuDev *dev, int qidx);
52 static void vg_cleanup_mapping(VuGpu *g,
53 struct virtio_gpu_simple_resource *res);
54
55 static const char *
56 vg_cmd_to_string(int cmd)
57 {
58 #define CMD(cmd) [cmd] = #cmd
59 static const char *vg_cmd_str[] = {
60 CMD(VIRTIO_GPU_UNDEFINED),
61
62 /* 2d commands */
63 CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO),
64 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
65 CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF),
66 CMD(VIRTIO_GPU_CMD_SET_SCANOUT),
67 CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
68 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
69 CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
70 CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING),
71 CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO),
72 CMD(VIRTIO_GPU_CMD_GET_CAPSET),
73
74 /* 3d commands */
75 CMD(VIRTIO_GPU_CMD_CTX_CREATE),
76 CMD(VIRTIO_GPU_CMD_CTX_DESTROY),
77 CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE),
78 CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE),
79 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D),
80 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D),
81 CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D),
82 CMD(VIRTIO_GPU_CMD_SUBMIT_3D),
83
84 /* cursor commands */
85 CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR),
86 CMD(VIRTIO_GPU_CMD_MOVE_CURSOR),
87 };
88 #undef REQ
89
90 if (cmd >= 0 && cmd < G_N_ELEMENTS(vg_cmd_str)) {
91 return vg_cmd_str[cmd];
92 } else {
93 return "unknown";
94 }
95 }
96
97 static int
98 vg_sock_fd_read(int sock, void *buf, ssize_t buflen)
99 {
100 int ret;
101
102 do {
103 ret = read(sock, buf, buflen);
104 } while (ret < 0 && (errno == EINTR || errno == EAGAIN));
105
106 g_warn_if_fail(ret == buflen);
107 return ret;
108 }
109
110 static void
111 vg_sock_fd_close(VuGpu *g)
112 {
113 if (g->sock_fd >= 0) {
114 close(g->sock_fd);
115 g->sock_fd = -1;
116 }
117 }
118
119 static gboolean
120 source_wait_cb(gint fd, GIOCondition condition, gpointer user_data)
121 {
122 VuGpu *g = user_data;
123
124 if (!vg_recv_msg(g, VHOST_USER_GPU_DMABUF_UPDATE, 0, NULL)) {
125 return G_SOURCE_CONTINUE;
126 }
127
128 /* resume */
129 g->wait_in = 0;
130 vg_handle_ctrl(&g->dev.parent, 0);
131
132 return G_SOURCE_REMOVE;
133 }
134
135 void
136 vg_wait_ok(VuGpu *g)
137 {
138 assert(g->wait_in == 0);
139 g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
140 source_wait_cb, g);
141 }
142
143 static int
144 vg_sock_fd_write(int sock, const void *buf, ssize_t buflen, int fd)
145 {
146 ssize_t ret;
147 struct iovec iov = {
148 .iov_base = (void *)buf,
149 .iov_len = buflen,
150 };
151 struct msghdr msg = {
152 .msg_iov = &iov,
153 .msg_iovlen = 1,
154 };
155 union {
156 struct cmsghdr cmsghdr;
157 char control[CMSG_SPACE(sizeof(int))];
158 } cmsgu;
159 struct cmsghdr *cmsg;
160
161 if (fd != -1) {
162 msg.msg_control = cmsgu.control;
163 msg.msg_controllen = sizeof(cmsgu.control);
164
165 cmsg = CMSG_FIRSTHDR(&msg);
166 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
167 cmsg->cmsg_level = SOL_SOCKET;
168 cmsg->cmsg_type = SCM_RIGHTS;
169
170 *((int *)CMSG_DATA(cmsg)) = fd;
171 }
172
173 do {
174 ret = sendmsg(sock, &msg, 0);
175 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
176
177 g_warn_if_fail(ret == buflen);
178 return ret;
179 }
180
181 void
182 vg_send_msg(VuGpu *vg, const VhostUserGpuMsg *msg, int fd)
183 {
184 if (vg_sock_fd_write(vg->sock_fd, msg,
185 VHOST_USER_GPU_HDR_SIZE + msg->size, fd) < 0) {
186 vg_sock_fd_close(vg);
187 }
188 }
189
190 bool
191 vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size,
192 gpointer payload)
193 {
194 uint32_t req, flags, size;
195
196 if (vg_sock_fd_read(g->sock_fd, &req, sizeof(req)) < 0 ||
197 vg_sock_fd_read(g->sock_fd, &flags, sizeof(flags)) < 0 ||
198 vg_sock_fd_read(g->sock_fd, &size, sizeof(size)) < 0) {
199 goto err;
200 }
201
202 g_return_val_if_fail(req == expect_req, false);
203 g_return_val_if_fail(flags & VHOST_USER_GPU_MSG_FLAG_REPLY, false);
204 g_return_val_if_fail(size == expect_size, false);
205
206 if (size && vg_sock_fd_read(g->sock_fd, payload, size) != size) {
207 goto err;
208 }
209
210 return true;
211
212 err:
213 vg_sock_fd_close(g);
214 return false;
215 }
216
217 static struct virtio_gpu_simple_resource *
218 virtio_gpu_find_resource(VuGpu *g, uint32_t resource_id)
219 {
220 struct virtio_gpu_simple_resource *res;
221
222 QTAILQ_FOREACH(res, &g->reslist, next) {
223 if (res->resource_id == resource_id) {
224 return res;
225 }
226 }
227 return NULL;
228 }
229
230 void
231 vg_ctrl_response(VuGpu *g,
232 struct virtio_gpu_ctrl_command *cmd,
233 struct virtio_gpu_ctrl_hdr *resp,
234 size_t resp_len)
235 {
236 size_t s;
237
238 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
239 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
240 resp->fence_id = cmd->cmd_hdr.fence_id;
241 resp->ctx_id = cmd->cmd_hdr.ctx_id;
242 }
243 virtio_gpu_ctrl_hdr_bswap(resp);
244 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
245 if (s != resp_len) {
246 g_critical("%s: response size incorrect %zu vs %zu",
247 __func__, s, resp_len);
248 }
249 vu_queue_push(&g->dev.parent, cmd->vq, &cmd->elem, s);
250 vu_queue_notify(&g->dev.parent, cmd->vq);
251 cmd->state = VG_CMD_STATE_FINISHED;
252 }
253
254 void
255 vg_ctrl_response_nodata(VuGpu *g,
256 struct virtio_gpu_ctrl_command *cmd,
257 enum virtio_gpu_ctrl_type type)
258 {
259 struct virtio_gpu_ctrl_hdr resp = {
260 .type = type,
261 };
262
263 vg_ctrl_response(g, cmd, &resp, sizeof(resp));
264 }
265
266
267 static gboolean
268 get_display_info_cb(gint fd, GIOCondition condition, gpointer user_data)
269 {
270 struct virtio_gpu_resp_display_info dpy_info = { {} };
271 VuGpu *vg = user_data;
272 struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq);
273
274 g_debug("disp info cb");
275 assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
276 if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_DISPLAY_INFO,
277 sizeof(dpy_info), &dpy_info)) {
278 return G_SOURCE_CONTINUE;
279 }
280
281 QTAILQ_REMOVE(&vg->fenceq, cmd, next);
282 vg_ctrl_response(vg, cmd, &dpy_info.hdr, sizeof(dpy_info));
283
284 vg->wait_in = 0;
285 vg_handle_ctrl(&vg->dev.parent, 0);
286
287 return G_SOURCE_REMOVE;
288 }
289
290 void
291 vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
292 {
293 VhostUserGpuMsg msg = {
294 .request = VHOST_USER_GPU_GET_DISPLAY_INFO,
295 .size = 0,
296 };
297
298 assert(vg->wait_in == 0);
299
300 vg_send_msg(vg, &msg, -1);
301 vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP,
302 get_display_info_cb, vg);
303 cmd->state = VG_CMD_STATE_PENDING;
304 }
305
306 static void
307 vg_resource_create_2d(VuGpu *g,
308 struct virtio_gpu_ctrl_command *cmd)
309 {
310 pixman_format_code_t pformat;
311 struct virtio_gpu_simple_resource *res;
312 struct virtio_gpu_resource_create_2d c2d;
313
314 VUGPU_FILL_CMD(c2d);
315 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
316
317 if (c2d.resource_id == 0) {
318 g_critical("%s: resource id 0 is not allowed", __func__);
319 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
320 return;
321 }
322
323 res = virtio_gpu_find_resource(g, c2d.resource_id);
324 if (res) {
325 g_critical("%s: resource already exists %d", __func__, c2d.resource_id);
326 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
327 return;
328 }
329
330 res = g_new0(struct virtio_gpu_simple_resource, 1);
331 res->width = c2d.width;
332 res->height = c2d.height;
333 res->format = c2d.format;
334 res->resource_id = c2d.resource_id;
335
336 pformat = virtio_gpu_get_pixman_format(c2d.format);
337 if (!pformat) {
338 g_critical("%s: host couldn't handle guest format %d",
339 __func__, c2d.format);
340 g_free(res);
341 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
342 return;
343 }
344 vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height);
345 res->image = pixman_image_create_bits(pformat,
346 c2d.width,
347 c2d.height,
348 (uint32_t *)res->buffer.mmap,
349 res->buffer.stride);
350 if (!res->image) {
351 g_critical("%s: resource creation failed %d %d %d",
352 __func__, c2d.resource_id, c2d.width, c2d.height);
353 vugbm_buffer_destroy(&res->buffer);
354 g_free(res);
355 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
356 return;
357 }
358
359 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
360 }
361
362 static void
363 vg_disable_scanout(VuGpu *g, int scanout_id)
364 {
365 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
366 struct virtio_gpu_simple_resource *res;
367
368 if (scanout->resource_id == 0) {
369 return;
370 }
371
372 res = virtio_gpu_find_resource(g, scanout->resource_id);
373 if (res) {
374 res->scanout_bitmask &= ~(1 << scanout_id);
375 }
376
377 scanout->width = 0;
378 scanout->height = 0;
379
380 if (g->sock_fd >= 0) {
381 VhostUserGpuMsg msg = {
382 .request = VHOST_USER_GPU_SCANOUT,
383 .size = sizeof(VhostUserGpuScanout),
384 .payload.scanout.scanout_id = scanout_id,
385 };
386 vg_send_msg(g, &msg, -1);
387 }
388 }
389
390 static void
391 vg_resource_destroy(VuGpu *g,
392 struct virtio_gpu_simple_resource *res)
393 {
394 int i;
395
396 if (res->scanout_bitmask) {
397 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
398 if (res->scanout_bitmask & (1 << i)) {
399 vg_disable_scanout(g, i);
400 }
401 }
402 }
403
404 vugbm_buffer_destroy(&res->buffer);
405 vg_cleanup_mapping(g, res);
406 pixman_image_unref(res->image);
407 QTAILQ_REMOVE(&g->reslist, res, next);
408 g_free(res);
409 }
410
411 static void
412 vg_resource_unref(VuGpu *g,
413 struct virtio_gpu_ctrl_command *cmd)
414 {
415 struct virtio_gpu_simple_resource *res;
416 struct virtio_gpu_resource_unref unref;
417
418 VUGPU_FILL_CMD(unref);
419 virtio_gpu_bswap_32(&unref, sizeof(unref));
420
421 res = virtio_gpu_find_resource(g, unref.resource_id);
422 if (!res) {
423 g_critical("%s: illegal resource specified %d",
424 __func__, unref.resource_id);
425 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
426 return;
427 }
428 vg_resource_destroy(g, res);
429 }
430
431 int
432 vg_create_mapping_iov(VuGpu *g,
433 struct virtio_gpu_resource_attach_backing *ab,
434 struct virtio_gpu_ctrl_command *cmd,
435 struct iovec **iov)
436 {
437 struct virtio_gpu_mem_entry *ents;
438 size_t esize, s;
439 int i;
440
441 if (ab->nr_entries > 16384) {
442 g_critical("%s: nr_entries is too big (%d > 16384)",
443 __func__, ab->nr_entries);
444 return -1;
445 }
446
447 esize = sizeof(*ents) * ab->nr_entries;
448 ents = g_malloc(esize);
449 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
450 sizeof(*ab), ents, esize);
451 if (s != esize) {
452 g_critical("%s: command data size incorrect %zu vs %zu",
453 __func__, s, esize);
454 g_free(ents);
455 return -1;
456 }
457
458 *iov = g_new0(struct iovec, ab->nr_entries);
459 for (i = 0; i < ab->nr_entries; i++) {
460 uint64_t len = ents[i].length;
461 (*iov)[i].iov_len = ents[i].length;
462 (*iov)[i].iov_base = vu_gpa_to_va(&g->dev.parent, &len, ents[i].addr);
463 if (!(*iov)[i].iov_base || len != ents[i].length) {
464 g_critical("%s: resource %d element %d",
465 __func__, ab->resource_id, i);
466 g_free(*iov);
467 g_free(ents);
468 *iov = NULL;
469 return -1;
470 }
471 }
472 g_free(ents);
473 return 0;
474 }
475
476 static void
477 vg_resource_attach_backing(VuGpu *g,
478 struct virtio_gpu_ctrl_command *cmd)
479 {
480 struct virtio_gpu_simple_resource *res;
481 struct virtio_gpu_resource_attach_backing ab;
482 int ret;
483
484 VUGPU_FILL_CMD(ab);
485 virtio_gpu_bswap_32(&ab, sizeof(ab));
486
487 res = virtio_gpu_find_resource(g, ab.resource_id);
488 if (!res) {
489 g_critical("%s: illegal resource specified %d",
490 __func__, ab.resource_id);
491 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
492 return;
493 }
494
495 if (res->iov) {
496 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
497 return;
498 }
499
500 ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov);
501 if (ret != 0) {
502 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
503 return;
504 }
505
506 res->iov_cnt = ab.nr_entries;
507 }
508
509 /* Though currently only free iov, maybe later will do more work. */
510 void vg_cleanup_mapping_iov(VuGpu *g,
511 struct iovec *iov, uint32_t count)
512 {
513 g_free(iov);
514 }
515
516 static void
517 vg_cleanup_mapping(VuGpu *g,
518 struct virtio_gpu_simple_resource *res)
519 {
520 vg_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
521 res->iov = NULL;
522 res->iov_cnt = 0;
523 }
524
525 static void
526 vg_resource_detach_backing(VuGpu *g,
527 struct virtio_gpu_ctrl_command *cmd)
528 {
529 struct virtio_gpu_simple_resource *res;
530 struct virtio_gpu_resource_detach_backing detach;
531
532 VUGPU_FILL_CMD(detach);
533 virtio_gpu_bswap_32(&detach, sizeof(detach));
534
535 res = virtio_gpu_find_resource(g, detach.resource_id);
536 if (!res || !res->iov) {
537 g_critical("%s: illegal resource specified %d",
538 __func__, detach.resource_id);
539 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
540 return;
541 }
542
543 vg_cleanup_mapping(g, res);
544 }
545
546 static void
547 vg_transfer_to_host_2d(VuGpu *g,
548 struct virtio_gpu_ctrl_command *cmd)
549 {
550 struct virtio_gpu_simple_resource *res;
551 int h;
552 uint32_t src_offset, dst_offset, stride;
553 int bpp;
554 pixman_format_code_t format;
555 struct virtio_gpu_transfer_to_host_2d t2d;
556
557 VUGPU_FILL_CMD(t2d);
558 virtio_gpu_t2d_bswap(&t2d);
559
560 res = virtio_gpu_find_resource(g, t2d.resource_id);
561 if (!res || !res->iov) {
562 g_critical("%s: illegal resource specified %d",
563 __func__, t2d.resource_id);
564 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
565 return;
566 }
567
568 if (t2d.r.x > res->width ||
569 t2d.r.y > res->height ||
570 t2d.r.width > res->width ||
571 t2d.r.height > res->height ||
572 t2d.r.x + t2d.r.width > res->width ||
573 t2d.r.y + t2d.r.height > res->height) {
574 g_critical("%s: transfer bounds outside resource"
575 " bounds for resource %d: %d %d %d %d vs %d %d",
576 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
577 t2d.r.width, t2d.r.height, res->width, res->height);
578 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
579 return;
580 }
581
582 format = pixman_image_get_format(res->image);
583 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
584 stride = pixman_image_get_stride(res->image);
585
586 if (t2d.offset || t2d.r.x || t2d.r.y ||
587 t2d.r.width != pixman_image_get_width(res->image)) {
588 void *img_data = pixman_image_get_data(res->image);
589 for (h = 0; h < t2d.r.height; h++) {
590 src_offset = t2d.offset + stride * h;
591 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
592
593 iov_to_buf(res->iov, res->iov_cnt, src_offset,
594 img_data
595 + dst_offset, t2d.r.width * bpp);
596 }
597 } else {
598 iov_to_buf(res->iov, res->iov_cnt, 0,
599 pixman_image_get_data(res->image),
600 pixman_image_get_stride(res->image)
601 * pixman_image_get_height(res->image));
602 }
603 }
604
605 static void
606 vg_set_scanout(VuGpu *g,
607 struct virtio_gpu_ctrl_command *cmd)
608 {
609 struct virtio_gpu_simple_resource *res, *ores;
610 struct virtio_gpu_scanout *scanout;
611 struct virtio_gpu_set_scanout ss;
612 int fd;
613
614 VUGPU_FILL_CMD(ss);
615 virtio_gpu_bswap_32(&ss, sizeof(ss));
616
617 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
618 g_critical("%s: illegal scanout id specified %d",
619 __func__, ss.scanout_id);
620 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
621 return;
622 }
623
624 if (ss.resource_id == 0) {
625 vg_disable_scanout(g, ss.scanout_id);
626 return;
627 }
628
629 /* create a surface for this scanout */
630 res = virtio_gpu_find_resource(g, ss.resource_id);
631 if (!res) {
632 g_critical("%s: illegal resource specified %d",
633 __func__, ss.resource_id);
634 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
635 return;
636 }
637
638 if (ss.r.x > res->width ||
639 ss.r.y > res->height ||
640 ss.r.width > res->width ||
641 ss.r.height > res->height ||
642 ss.r.x + ss.r.width > res->width ||
643 ss.r.y + ss.r.height > res->height) {
644 g_critical("%s: illegal scanout %d bounds for"
645 " resource %d, (%d,%d)+%d,%d vs %d %d",
646 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
647 ss.r.width, ss.r.height, res->width, res->height);
648 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
649 return;
650 }
651
652 scanout = &g->scanout[ss.scanout_id];
653
654 ores = virtio_gpu_find_resource(g, scanout->resource_id);
655 if (ores) {
656 ores->scanout_bitmask &= ~(1 << ss.scanout_id);
657 }
658
659 res->scanout_bitmask |= (1 << ss.scanout_id);
660 scanout->resource_id = ss.resource_id;
661 scanout->x = ss.r.x;
662 scanout->y = ss.r.y;
663 scanout->width = ss.r.width;
664 scanout->height = ss.r.height;
665
666 struct vugbm_buffer *buffer = &res->buffer;
667
668 if (vugbm_buffer_can_get_dmabuf_fd(buffer)) {
669 VhostUserGpuMsg msg = {
670 .request = VHOST_USER_GPU_DMABUF_SCANOUT,
671 .size = sizeof(VhostUserGpuDMABUFScanout),
672 .payload.dmabuf_scanout = (VhostUserGpuDMABUFScanout) {
673 .scanout_id = ss.scanout_id,
674 .x = ss.r.x,
675 .y = ss.r.y,
676 .width = ss.r.width,
677 .height = ss.r.height,
678 .fd_width = buffer->width,
679 .fd_height = buffer->height,
680 .fd_stride = buffer->stride,
681 .fd_drm_fourcc = buffer->format
682 }
683 };
684
685 if (vugbm_buffer_get_dmabuf_fd(buffer, &fd)) {
686 vg_send_msg(g, &msg, fd);
687 close(fd);
688 }
689 } else {
690 VhostUserGpuMsg msg = {
691 .request = VHOST_USER_GPU_SCANOUT,
692 .size = sizeof(VhostUserGpuScanout),
693 .payload.scanout = (VhostUserGpuScanout) {
694 .scanout_id = ss.scanout_id,
695 .width = scanout->width,
696 .height = scanout->height
697 }
698 };
699 vg_send_msg(g, &msg, -1);
700 }
701 }
702
703 static void
704 vg_resource_flush(VuGpu *g,
705 struct virtio_gpu_ctrl_command *cmd)
706 {
707 struct virtio_gpu_simple_resource *res;
708 struct virtio_gpu_resource_flush rf;
709 pixman_region16_t flush_region;
710 int i;
711
712 VUGPU_FILL_CMD(rf);
713 virtio_gpu_bswap_32(&rf, sizeof(rf));
714
715 res = virtio_gpu_find_resource(g, rf.resource_id);
716 if (!res) {
717 g_critical("%s: illegal resource specified %d\n",
718 __func__, rf.resource_id);
719 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
720 return;
721 }
722
723 if (rf.r.x > res->width ||
724 rf.r.y > res->height ||
725 rf.r.width > res->width ||
726 rf.r.height > res->height ||
727 rf.r.x + rf.r.width > res->width ||
728 rf.r.y + rf.r.height > res->height) {
729 g_critical("%s: flush bounds outside resource"
730 " bounds for resource %d: %d %d %d %d vs %d %d\n",
731 __func__, rf.resource_id, rf.r.x, rf.r.y,
732 rf.r.width, rf.r.height, res->width, res->height);
733 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
734 return;
735 }
736
737 pixman_region_init_rect(&flush_region,
738 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
739 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
740 struct virtio_gpu_scanout *scanout;
741 pixman_region16_t region, finalregion;
742 pixman_box16_t *extents;
743
744 if (!(res->scanout_bitmask & (1 << i))) {
745 continue;
746 }
747 scanout = &g->scanout[i];
748
749 pixman_region_init(&finalregion);
750 pixman_region_init_rect(&region, scanout->x, scanout->y,
751 scanout->width, scanout->height);
752
753 pixman_region_intersect(&finalregion, &flush_region, &region);
754
755 extents = pixman_region_extents(&finalregion);
756 size_t width = extents->x2 - extents->x1;
757 size_t height = extents->y2 - extents->y1;
758
759 if (vugbm_buffer_can_get_dmabuf_fd(&res->buffer)) {
760 VhostUserGpuMsg vmsg = {
761 .request = VHOST_USER_GPU_DMABUF_UPDATE,
762 .size = sizeof(VhostUserGpuUpdate),
763 .payload.update = (VhostUserGpuUpdate) {
764 .scanout_id = i,
765 .x = extents->x1,
766 .y = extents->y1,
767 .width = width,
768 .height = height,
769 }
770 };
771 vg_send_msg(g, &vmsg, -1);
772 vg_wait_ok(g);
773 } else {
774 size_t bpp =
775 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) / 8;
776 size_t size = width * height * bpp;
777
778 void *p = g_malloc(VHOST_USER_GPU_HDR_SIZE +
779 sizeof(VhostUserGpuUpdate) + size);
780 VhostUserGpuMsg *msg = p;
781 msg->request = VHOST_USER_GPU_UPDATE;
782 msg->size = sizeof(VhostUserGpuUpdate) + size;
783 msg->payload.update = (VhostUserGpuUpdate) {
784 .scanout_id = i,
785 .x = extents->x1,
786 .y = extents->y1,
787 .width = width,
788 .height = height,
789 };
790 pixman_image_t *i =
791 pixman_image_create_bits(pixman_image_get_format(res->image),
792 msg->payload.update.width,
793 msg->payload.update.height,
794 p + offsetof(VhostUserGpuMsg,
795 payload.update.data),
796 width * bpp);
797 pixman_image_composite(PIXMAN_OP_SRC,
798 res->image, NULL, i,
799 extents->x1, extents->y1,
800 0, 0, 0, 0,
801 width, height);
802 pixman_image_unref(i);
803 vg_send_msg(g, msg, -1);
804 g_free(msg);
805 }
806 pixman_region_fini(&region);
807 pixman_region_fini(&finalregion);
808 }
809 pixman_region_fini(&flush_region);
810 }
811
812 static void
813 vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
814 {
815 switch (cmd->cmd_hdr.type) {
816 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
817 vg_get_display_info(vg, cmd);
818 break;
819 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
820 vg_resource_create_2d(vg, cmd);
821 break;
822 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
823 vg_resource_unref(vg, cmd);
824 break;
825 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
826 vg_resource_flush(vg, cmd);
827 break;
828 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
829 vg_transfer_to_host_2d(vg, cmd);
830 break;
831 case VIRTIO_GPU_CMD_SET_SCANOUT:
832 vg_set_scanout(vg, cmd);
833 break;
834 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
835 vg_resource_attach_backing(vg, cmd);
836 break;
837 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
838 vg_resource_detach_backing(vg, cmd);
839 break;
840 /* case VIRTIO_GPU_CMD_GET_EDID: */
841 /* break */
842 default:
843 g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
844 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
845 break;
846 }
847 if (cmd->state == VG_CMD_STATE_NEW) {
848 vg_ctrl_response_nodata(vg, cmd, cmd->error ? cmd->error :
849 VIRTIO_GPU_RESP_OK_NODATA);
850 }
851 }
852
853 static void
854 vg_handle_ctrl(VuDev *dev, int qidx)
855 {
856 VuGpu *vg = container_of(dev, VuGpu, dev.parent);
857 VuVirtq *vq = vu_get_queue(dev, qidx);
858 struct virtio_gpu_ctrl_command *cmd = NULL;
859 size_t len;
860
861 for (;;) {
862 if (vg->wait_in != 0) {
863 return;
864 }
865
866 cmd = vu_queue_pop(dev, vq, sizeof(struct virtio_gpu_ctrl_command));
867 if (!cmd) {
868 break;
869 }
870 cmd->vq = vq;
871 cmd->error = 0;
872 cmd->state = VG_CMD_STATE_NEW;
873
874 len = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
875 0, &cmd->cmd_hdr, sizeof(cmd->cmd_hdr));
876 if (len != sizeof(cmd->cmd_hdr)) {
877 g_warning("%s: command size incorrect %zu vs %zu\n",
878 __func__, len, sizeof(cmd->cmd_hdr));
879 }
880
881 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
882 g_debug("%d %s\n", cmd->cmd_hdr.type,
883 vg_cmd_to_string(cmd->cmd_hdr.type));
884
885 if (vg->virgl) {
886 vg_virgl_process_cmd(vg, cmd);
887 } else {
888 vg_process_cmd(vg, cmd);
889 }
890
891 if (cmd->state != VG_CMD_STATE_FINISHED) {
892 QTAILQ_INSERT_TAIL(&vg->fenceq, cmd, next);
893 vg->inflight++;
894 } else {
895 free(cmd);
896 }
897 }
898 }
899
900 static void
901 update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data)
902 {
903 struct virtio_gpu_simple_resource *res;
904
905 res = virtio_gpu_find_resource(g, resource_id);
906 g_return_if_fail(res != NULL);
907 g_return_if_fail(pixman_image_get_width(res->image) == 64);
908 g_return_if_fail(pixman_image_get_height(res->image) == 64);
909 g_return_if_fail(
910 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32);
911
912 memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t));
913 }
914
915 static void
916 vg_process_cursor_cmd(VuGpu *g, struct virtio_gpu_update_cursor *cursor)
917 {
918 switch (cursor->hdr.type) {
919 case VIRTIO_GPU_CMD_MOVE_CURSOR: {
920 VhostUserGpuMsg msg = {
921 .request = cursor->resource_id ?
922 VHOST_USER_GPU_CURSOR_POS : VHOST_USER_GPU_CURSOR_POS_HIDE,
923 .size = sizeof(VhostUserGpuCursorPos),
924 .payload.cursor_pos = {
925 .scanout_id = cursor->pos.scanout_id,
926 .x = cursor->pos.x,
927 .y = cursor->pos.y,
928 }
929 };
930 g_debug("%s: move", G_STRFUNC);
931 vg_send_msg(g, &msg, -1);
932 break;
933 }
934 case VIRTIO_GPU_CMD_UPDATE_CURSOR: {
935 VhostUserGpuMsg msg = {
936 .request = VHOST_USER_GPU_CURSOR_UPDATE,
937 .size = sizeof(VhostUserGpuCursorUpdate),
938 .payload.cursor_update = {
939 .pos = {
940 .scanout_id = cursor->pos.scanout_id,
941 .x = cursor->pos.x,
942 .y = cursor->pos.y,
943 },
944 .hot_x = cursor->hot_x,
945 .hot_y = cursor->hot_y,
946 }
947 };
948 g_debug("%s: update", G_STRFUNC);
949 if (g->virgl) {
950 vg_virgl_update_cursor_data(g, cursor->resource_id,
951 msg.payload.cursor_update.data);
952 } else {
953 update_cursor_data_simple(g, cursor->resource_id,
954 msg.payload.cursor_update.data);
955 }
956 vg_send_msg(g, &msg, -1);
957 break;
958 }
959 default:
960 g_debug("%s: unknown cmd %d", G_STRFUNC, cursor->hdr.type);
961 break;
962 }
963 }
964
965 static void
966 vg_handle_cursor(VuDev *dev, int qidx)
967 {
968 VuGpu *g = container_of(dev, VuGpu, dev.parent);
969 VuVirtq *vq = vu_get_queue(dev, qidx);
970 VuVirtqElement *elem;
971 size_t len;
972 struct virtio_gpu_update_cursor cursor;
973
974 for (;;) {
975 elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
976 if (!elem) {
977 break;
978 }
979 g_debug("cursor out:%d in:%d\n", elem->out_num, elem->in_num);
980
981 len = iov_to_buf(elem->out_sg, elem->out_num,
982 0, &cursor, sizeof(cursor));
983 if (len != sizeof(cursor)) {
984 g_warning("%s: cursor size incorrect %zu vs %zu\n",
985 __func__, len, sizeof(cursor));
986 } else {
987 virtio_gpu_bswap_32(&cursor, sizeof(cursor));
988 vg_process_cursor_cmd(g, &cursor);
989 }
990 vu_queue_push(dev, vq, elem, 0);
991 vu_queue_notify(dev, vq);
992 free(elem);
993 }
994 }
995
996 static void
997 vg_panic(VuDev *dev, const char *msg)
998 {
999 g_critical("%s\n", msg);
1000 exit(1);
1001 }
1002
1003 static void
1004 vg_queue_set_started(VuDev *dev, int qidx, bool started)
1005 {
1006 VuVirtq *vq = vu_get_queue(dev, qidx);
1007
1008 g_debug("queue started %d:%d\n", qidx, started);
1009
1010 switch (qidx) {
1011 case 0:
1012 vu_set_queue_handler(dev, vq, started ? vg_handle_ctrl : NULL);
1013 break;
1014 case 1:
1015 vu_set_queue_handler(dev, vq, started ? vg_handle_cursor : NULL);
1016 break;
1017 default:
1018 break;
1019 }
1020 }
1021
1022 static gboolean
1023 protocol_features_cb(gint fd, GIOCondition condition, gpointer user_data)
1024 {
1025 VuGpu *g = user_data;
1026 uint64_t u64;
1027 VhostUserGpuMsg msg = {
1028 .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
1029 };
1030
1031 if (!vg_recv_msg(g, msg.request, sizeof(u64), &u64)) {
1032 return G_SOURCE_CONTINUE;
1033 }
1034
1035 msg = (VhostUserGpuMsg) {
1036 .request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
1037 .size = sizeof(uint64_t),
1038 .payload.u64 = 0
1039 };
1040 vg_send_msg(g, &msg, -1);
1041
1042 g->wait_in = 0;
1043 vg_handle_ctrl(&g->dev.parent, 0);
1044
1045 return G_SOURCE_REMOVE;
1046 }
1047
1048 static void
1049 set_gpu_protocol_features(VuGpu *g)
1050 {
1051 VhostUserGpuMsg msg = {
1052 .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
1053 };
1054
1055 vg_send_msg(g, &msg, -1);
1056 assert(g->wait_in == 0);
1057 g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
1058 protocol_features_cb, g);
1059 }
1060
1061 static int
1062 vg_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply)
1063 {
1064 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1065
1066 switch (msg->request) {
1067 case VHOST_USER_GPU_SET_SOCKET: {
1068 g_return_val_if_fail(msg->fd_num == 1, 1);
1069 g_return_val_if_fail(g->sock_fd == -1, 1);
1070 g->sock_fd = msg->fds[0];
1071 set_gpu_protocol_features(g);
1072 return 1;
1073 }
1074 default:
1075 return 0;
1076 }
1077
1078 return 0;
1079 }
1080
1081 static uint64_t
1082 vg_get_features(VuDev *dev)
1083 {
1084 uint64_t features = 0;
1085
1086 if (opt_virgl) {
1087 features |= 1 << VIRTIO_GPU_F_VIRGL;
1088 }
1089
1090 return features;
1091 }
1092
1093 static void
1094 vg_set_features(VuDev *dev, uint64_t features)
1095 {
1096 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1097 bool virgl = features & (1 << VIRTIO_GPU_F_VIRGL);
1098
1099 if (virgl && !g->virgl_inited) {
1100 if (!vg_virgl_init(g)) {
1101 vg_panic(dev, "Failed to initialize virgl");
1102 }
1103 g->virgl_inited = true;
1104 }
1105
1106 g->virgl = virgl;
1107 }
1108
1109 static int
1110 vg_get_config(VuDev *dev, uint8_t *config, uint32_t len)
1111 {
1112 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1113
1114 if (len > sizeof(struct virtio_gpu_config)) {
1115 return -1;
1116 }
1117
1118 if (opt_virgl) {
1119 g->virtio_config.num_capsets = vg_virgl_get_num_capsets();
1120 }
1121
1122 memcpy(config, &g->virtio_config, len);
1123
1124 return 0;
1125 }
1126
1127 static int
1128 vg_set_config(VuDev *dev, const uint8_t *data,
1129 uint32_t offset, uint32_t size,
1130 uint32_t flags)
1131 {
1132 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1133 struct virtio_gpu_config *config = (struct virtio_gpu_config *)data;
1134
1135 if (config->events_clear) {
1136 g->virtio_config.events_read &= ~config->events_clear;
1137 }
1138
1139 return 0;
1140 }
1141
1142 static const VuDevIface vuiface = {
1143 .set_features = vg_set_features,
1144 .get_features = vg_get_features,
1145 .queue_set_started = vg_queue_set_started,
1146 .process_msg = vg_process_msg,
1147 .get_config = vg_get_config,
1148 .set_config = vg_set_config,
1149 };
1150
1151 static void
1152 vg_destroy(VuGpu *g)
1153 {
1154 struct virtio_gpu_simple_resource *res, *tmp;
1155
1156 vug_deinit(&g->dev);
1157
1158 vg_sock_fd_close(g);
1159
1160 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1161 vg_resource_destroy(g, res);
1162 }
1163
1164 vugbm_device_destroy(&g->gdev);
1165 }
1166
1167 static GOptionEntry entries[] = {
1168 { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps,
1169 "Print capabilities", NULL },
1170 { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum,
1171 "Use inherited fd socket", "FDNUM" },
1172 { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path,
1173 "Use UNIX socket path", "PATH" },
1174 { "render-node", 'r', 0, G_OPTION_ARG_FILENAME, &opt_render_node,
1175 "Specify DRM render node", "PATH" },
1176 { "virgl", 'v', 0, G_OPTION_ARG_NONE, &opt_virgl,
1177 "Turn virgl rendering on", NULL },
1178 { NULL, }
1179 };
1180
1181 int
1182 main(int argc, char *argv[])
1183 {
1184 GOptionContext *context;
1185 GError *error = NULL;
1186 GMainLoop *loop = NULL;
1187 int fd;
1188 VuGpu g = { .sock_fd = -1, .drm_rnode_fd = -1 };
1189
1190 QTAILQ_INIT(&g.reslist);
1191 QTAILQ_INIT(&g.fenceq);
1192
1193 context = g_option_context_new("QEMU vhost-user-gpu");
1194 g_option_context_add_main_entries(context, entries, NULL);
1195 if (!g_option_context_parse(context, &argc, &argv, &error)) {
1196 g_printerr("Option parsing failed: %s\n", error->message);
1197 exit(EXIT_FAILURE);
1198 }
1199 g_option_context_free(context);
1200
1201 if (opt_print_caps) {
1202 g_print("{\n");
1203 g_print(" \"type\": \"gpu\",\n");
1204 g_print(" \"features\": [\n");
1205 g_print(" \"render-node\",\n");
1206 g_print(" \"virgl\"\n");
1207 g_print(" ]\n");
1208 g_print("}\n");
1209 exit(EXIT_SUCCESS);
1210 }
1211
1212 g.drm_rnode_fd = qemu_drm_rendernode_open(opt_render_node);
1213 if (opt_render_node && g.drm_rnode_fd == -1) {
1214 g_printerr("Failed to open DRM rendernode.\n");
1215 exit(EXIT_FAILURE);
1216 }
1217
1218 vugbm_device_init(&g.gdev, g.drm_rnode_fd);
1219
1220 if ((!!opt_socket_path + (opt_fdnum != -1)) != 1) {
1221 g_printerr("Please specify either --fd or --socket-path\n");
1222 exit(EXIT_FAILURE);
1223 }
1224
1225 if (opt_socket_path) {
1226 int lsock = unix_listen(opt_socket_path, &error_fatal);
1227 if (lsock < 0) {
1228 g_printerr("Failed to listen on %s.\n", opt_socket_path);
1229 exit(EXIT_FAILURE);
1230 }
1231 fd = accept(lsock, NULL, NULL);
1232 close(lsock);
1233 } else {
1234 fd = opt_fdnum;
1235 }
1236 if (fd == -1) {
1237 g_printerr("Invalid vhost-user socket.\n");
1238 exit(EXIT_FAILURE);
1239 }
1240
1241 if (!vug_init(&g.dev, VHOST_USER_GPU_MAX_QUEUES, fd, vg_panic, &vuiface)) {
1242 g_printerr("Failed to initialize libvhost-user-glib.\n");
1243 exit(EXIT_FAILURE);
1244 }
1245
1246 loop = g_main_loop_new(NULL, FALSE);
1247 g_main_loop_run(loop);
1248 g_main_loop_unref(loop);
1249
1250 vg_destroy(&g);
1251 if (g.drm_rnode_fd >= 0) {
1252 close(g.drm_rnode_fd);
1253 }
1254
1255 return 0;
1256 }