From: Antonio Caggiano <antonio.caggiano@collabora.com>
Support BLOB resources creation, mapping and unmapping by calling the
new stable virglrenderer 0.10 interface. Only enabled when available and
via the blob config. E.g. -device virtio-vga-gl,blob=true
Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com>
Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
hw/display/virtio-gpu-virgl.c | 310 ++++++++++++++++++++++++++++++++-
hw/display/virtio-gpu.c | 4 +-
include/hw/virtio/virtio-gpu.h | 2 +
3 files changed, 312 insertions(+), 4 deletions(-)
diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index 7d4d2882a5af..63a5a983aad6 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -26,6 +26,18 @@
struct virtio_gpu_virgl_resource {
struct virtio_gpu_simple_resource base;
+ MemoryRegion *mr;
+
+ /*
+ * Used by virgl_cmd_resource_unref() to know whether async
+ * unmapping has been started. Blob can be both mapped/unmapped
+ * on unref and we shouldn't unmap blob that wasn't mapped in the
+ * first place because it's a error condition. This flag prevents
+ * performing step 3 of the async unmapping process described in the
+ * comment to virtio_gpu_virgl_async_unmap_resource_blob() if blob
+ * wasn't mapped in the first place on unref.
+ */
+ bool async_unmap_in_progress;
};
static struct virtio_gpu_virgl_resource *
@@ -49,6 +61,128 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
}
#endif
+#ifdef HAVE_VIRGL_RESOURCE_BLOB
+struct virtio_gpu_virgl_hostmem_region {
+ MemoryRegion mr;
+ struct VirtIOGPU *g;
+ struct virtio_gpu_virgl_resource *res;
+};
+
+static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
+{
+ VirtIOGPU *g = opaque;
+
+ virtio_gpu_process_cmdq(g);
+}
+
+static void virtio_gpu_virgl_hostmem_region_free(void *obj)
+{
+ MemoryRegion *mr = MEMORY_REGION(obj);
+ struct virtio_gpu_virgl_hostmem_region *vmr;
+ VirtIOGPUBase *b;
+ VirtIOGPUGL *gl;
+
+ vmr = container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
+ vmr->res->mr = NULL;
+
+ b = VIRTIO_GPU_BASE(vmr->g);
+ b->renderer_blocked--;
+
+ /*
+ * memory_region_unref() is executed from RCU thread context, while
+ * virglrenderer works only on the main-loop thread that's holding GL
+ * context.
+ */
+ gl = VIRTIO_GPU_GL(vmr->g);
+ qemu_bh_schedule(gl->cmdq_resume_bh);
+ g_free(vmr);
+}
+
+static int
+virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
+ struct virtio_gpu_virgl_resource *res,
+ uint64_t offset)
+{
+ struct virtio_gpu_virgl_hostmem_region *vmr;
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+ MemoryRegion *mr;
+ uint64_t size;
+ void *data;
+ int ret;
+
+ if (!virtio_gpu_hostmem_enabled(b->conf)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
+ __func__, strerror(-ret));
+ return ret;
+ }
+
+ vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
+ vmr->res = res;
+ vmr->g = g;
+
+ mr = &vmr->mr;
+ memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
+ memory_region_add_subregion(&b->hostmem, offset, mr);
+ memory_region_set_enabled(mr, true);
+
+ /*
+ * MR could outlive the resource if MR's reference is held outside of
+ * virtio-gpu. In order to prevent unmapping resource while MR is alive,
+ * and thus, making the data pointer invalid, we will block virtio-gpu
+ * command processing until MR is fully unreferenced and freed.
+ */
+ OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
+
+ res->mr = mr;
+
+ return 0;
+}
+
+static int
+virtio_gpu_virgl_async_unmap_resource_blob(VirtIOGPU *g,
+ struct virtio_gpu_virgl_resource *res)
+{
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
+ MemoryRegion *mr = res->mr;
+ int ret;
+
+ /*
+ * Perform async unmapping in 3 steps:
+ *
+ * 1. Begin async unmapping with memory_region_del_subregion()
+ * and suspend/block cmd processing.
+ * 2. Wait for res->mr to be freed and cmd processing resumed
+ * asynchronously by virtio_gpu_virgl_hostmem_region_free().
+ * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
+ */
+ if (mr) {
+ /* render will be unblocked once MR is freed */
+ b->renderer_blocked++;
+
+ /* memory region owns self res->mr object and frees it by itself */
+ memory_region_set_enabled(mr, false);
+ memory_region_del_subregion(&b->hostmem, mr);
+ object_unparent(OBJECT(mr));
+ } else {
+ ret = virgl_renderer_resource_unmap(res->base.resource_id);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: failed to unmap virgl resource: %s\n",
+ __func__, strerror(-ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif /* HAVE_VIRGL_RESOURCE_BLOB */
+
static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
@@ -146,12 +280,14 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
}
static void virgl_cmd_resource_unref(VirtIOGPU *g,
- struct virtio_gpu_ctrl_command *cmd)
+ struct virtio_gpu_ctrl_command *cmd,
+ bool *cmd_suspended)
{
struct virtio_gpu_resource_unref unref;
struct virtio_gpu_virgl_resource *res;
struct iovec *res_iovs = NULL;
int num_iovs = 0;
+ int ret;
VIRTIO_GPU_FILL_CMD(unref);
trace_virtio_gpu_cmd_res_unref(unref.resource_id);
@@ -164,6 +300,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
return;
}
+ if (res->mr || res->async_unmap_in_progress) {
+ ret = virtio_gpu_virgl_async_unmap_resource_blob(g, res);
+ if (ret) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+ if (res->mr) {
+ res->async_unmap_in_progress = true;
+ *cmd_suspended = true;
+ return;
+ } else {
+ res->async_unmap_in_progress = false;
+ }
+ }
+
virgl_renderer_resource_detach_iov(unref.resource_id,
&res_iovs,
&num_iovs);
@@ -514,6 +666,137 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
}
#ifdef HAVE_VIRGL_RESOURCE_BLOB
+static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
+ struct virtio_gpu_resource_create_blob cblob;
+ struct virtio_gpu_virgl_resource *res;
+ int ret;
+
+ if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ VIRTIO_GPU_FILL_CMD(cblob);
+ virtio_gpu_create_blob_bswap(&cblob);
+ trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
+
+ if (cblob.resource_id == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
+ __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
+ if (res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
+ __func__, cblob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = g_new0(struct virtio_gpu_virgl_resource, 1);
+ res->base.resource_id = cblob.resource_id;
+ res->base.blob_size = cblob.size;
+ res->base.dmabuf_fd = -1;
+
+ if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
+ ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
+ cmd, &res->base.addrs,
+ &res->base.iov, &res->base.iov_cnt);
+ if (!ret) {
+ g_free(res);
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+ }
+
+ virgl_args.res_handle = cblob.resource_id;
+ virgl_args.ctx_id = cblob.hdr.ctx_id;
+ virgl_args.blob_mem = cblob.blob_mem;
+ virgl_args.blob_id = cblob.blob_id;
+ virgl_args.blob_flags = cblob.blob_flags;
+ virgl_args.size = cblob.size;
+ virgl_args.iovecs = res->base.iov;
+ virgl_args.num_iovs = res->base.iov_cnt;
+
+ ret = virgl_renderer_resource_create_blob(&virgl_args);
+ if (ret) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
+ __func__, strerror(-ret));
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ virtio_gpu_cleanup_mapping(g, &res->base);
+ g_free(res);
+ return;
+ }
+
+ QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
+}
+
+static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resource_map_blob mblob;
+ struct virtio_gpu_virgl_resource *res;
+ struct virtio_gpu_resp_map_info resp;
+ int ret;
+
+ VIRTIO_GPU_FILL_CMD(mblob);
+ virtio_gpu_map_blob_bswap(&mblob);
+
+ res = virtio_gpu_virgl_find_resource(g, mblob.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+ __func__, mblob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
+ if (ret) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+ memset(&resp, 0, sizeof(resp));
+ resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
+ virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
+ virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
+}
+
+static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
+ struct virtio_gpu_ctrl_command *cmd,
+ bool *cmd_suspended)
+{
+ struct virtio_gpu_resource_unmap_blob ublob;
+ struct virtio_gpu_virgl_resource *res;
+ int ret;
+
+ VIRTIO_GPU_FILL_CMD(ublob);
+ virtio_gpu_unmap_blob_bswap(&ublob);
+
+ res = virtio_gpu_virgl_find_resource(g, ublob.resource_id);
+ if (!res) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
+ __func__, ublob.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ ret = virtio_gpu_virgl_async_unmap_resource_blob(g, res);
+ if (ret) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+ if (res->mr) {
+ *cmd_suspended = true;
+ }
+}
+
static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
@@ -616,6 +899,8 @@ static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
+ bool cmd_suspended = false;
+
VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
virgl_renderer_force_ctx_0();
@@ -657,7 +942,7 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
virgl_cmd_resource_flush(g, cmd);
break;
case VIRTIO_GPU_CMD_RESOURCE_UNREF:
- virgl_cmd_resource_unref(g, cmd);
+ virgl_cmd_resource_unref(g, cmd, &cmd_suspended);
break;
case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
/* TODO add security */
@@ -680,6 +965,15 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
virtio_gpu_get_edid(g, cmd);
break;
#ifdef HAVE_VIRGL_RESOURCE_BLOB
+ case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
+ virgl_cmd_resource_create_blob(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
+ virgl_cmd_resource_map_blob(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
+ virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended);
+ break;
case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
virgl_cmd_set_scanout_blob(g, cmd);
break;
@@ -689,7 +983,7 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
break;
}
- if (cmd->finished) {
+ if (cmd_suspended || cmd->finished) {
return;
}
if (cmd->error) {
@@ -827,6 +1121,7 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
{
int ret;
uint32_t flags = 0;
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
#if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
if (qemu_egl_display) {
@@ -854,6 +1149,11 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
virtio_gpu_print_stats, g);
timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
}
+
+ gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
+ virtio_gpu_virgl_resume_cmdq_bh,
+ g);
+
return 0;
}
@@ -869,6 +1169,10 @@ int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g)
void virtio_gpu_virgl_deinit(VirtIOGPU *g)
{
+ VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
+
+ qemu_bh_delete(gl->cmdq_resume_bh);
+
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
timer_free(g->print_stats);
}
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 6c8c7213bafa..052ab493a00b 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -1483,10 +1483,12 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
return;
}
+#ifndef HAVE_VIRGL_RESOURCE_BLOB
if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
- error_setg(errp, "blobs and virgl are not compatible (yet)");
+ error_setg(errp, "old virglrenderer, blob resources unsupported");
return;
}
+#endif
}
if (!virtio_gpu_base_device_realize(qdev,
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index a98cb47ca0fa..f3c8014acc80 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -231,6 +231,8 @@ struct VirtIOGPUGL {
bool renderer_inited;
bool renderer_reset;
bool renderer_init_failed;
+
+ QEMUBH *cmdq_resume_bh;
};
struct VhostUserGPU {
--
2.44.0
On 2024/05/20 6:27, Dmitry Osipenko wrote:
> From: Antonio Caggiano <antonio.caggiano@collabora.com>
>
> Support BLOB resources creation, mapping and unmapping by calling the
> new stable virglrenderer 0.10 interface. Only enabled when available and
> via the blob config. E.g. -device virtio-vga-gl,blob=true
>
> Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com>
> Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com>
> Signed-off-by: Huang Rui <ray.huang@amd.com>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
> ---
> hw/display/virtio-gpu-virgl.c | 310 ++++++++++++++++++++++++++++++++-
> hw/display/virtio-gpu.c | 4 +-
> include/hw/virtio/virtio-gpu.h | 2 +
> 3 files changed, 312 insertions(+), 4 deletions(-)
>
> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
> index 7d4d2882a5af..63a5a983aad6 100644
> --- a/hw/display/virtio-gpu-virgl.c
> +++ b/hw/display/virtio-gpu-virgl.c
> @@ -26,6 +26,18 @@
>
> struct virtio_gpu_virgl_resource {
> struct virtio_gpu_simple_resource base;
> + MemoryRegion *mr;
> +
> + /*
> + * Used by virgl_cmd_resource_unref() to know whether async
> + * unmapping has been started. Blob can be both mapped/unmapped
> + * on unref and we shouldn't unmap blob that wasn't mapped in the
> + * first place because it's a error condition. This flag prevents
> + * performing step 3 of the async unmapping process described in the
> + * comment to virtio_gpu_virgl_async_unmap_resource_blob() if blob
> + * wasn't mapped in the first place on unref.
> + */
> + bool async_unmap_in_progress;
I suggest adding a field that tells if mr is deleted to
virtio_gpu_virgl_hostmem_region instead to minimize the size of
virtio_gpu_virgl_resource.
> };
>
> static struct virtio_gpu_virgl_resource *
> @@ -49,6 +61,128 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
> }
> #endif
>
> +#ifdef HAVE_VIRGL_RESOURCE_BLOB
> +struct virtio_gpu_virgl_hostmem_region {
> + MemoryRegion mr;
> + struct VirtIOGPU *g;
> + struct virtio_gpu_virgl_resource *res;
> +};
> +
> +static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
> +{
> + VirtIOGPU *g = opaque;
> +
> + virtio_gpu_process_cmdq(g);
> +}
> +
> +static void virtio_gpu_virgl_hostmem_region_free(void *obj)
> +{
> + MemoryRegion *mr = MEMORY_REGION(obj);
> + struct virtio_gpu_virgl_hostmem_region *vmr;
> + VirtIOGPUBase *b;
> + VirtIOGPUGL *gl;
> +
> + vmr = container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
> + vmr->res->mr = NULL;
> +
> + b = VIRTIO_GPU_BASE(vmr->g);
> + b->renderer_blocked--;
> +
> + /*
> + * memory_region_unref() is executed from RCU thread context, while
> + * virglrenderer works only on the main-loop thread that's holding GL
> + * context.
> + */
> + gl = VIRTIO_GPU_GL(vmr->g);
> + qemu_bh_schedule(gl->cmdq_resume_bh);
> + g_free(vmr);
> +}
> +
> +static int
> +virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
> + struct virtio_gpu_virgl_resource *res,
> + uint64_t offset)
> +{
> + struct virtio_gpu_virgl_hostmem_region *vmr;
> + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
> + MemoryRegion *mr;
> + uint64_t size;
> + void *data;
> + int ret;
> +
> + if (!virtio_gpu_hostmem_enabled(b->conf)) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
> + return -EOPNOTSUPP;
> + }
> +
> + ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
> + if (ret) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
> + __func__, strerror(-ret));
> + return ret;
> + }
> +
> + vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
> + vmr->res = res;
> + vmr->g = g;
> +
> + mr = &vmr->mr;
> + memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
> + memory_region_add_subregion(&b->hostmem, offset, mr);
> + memory_region_set_enabled(mr, true);
> +
> + /*
> + * MR could outlive the resource if MR's reference is held outside of
> + * virtio-gpu. In order to prevent unmapping resource while MR is alive,
> + * and thus, making the data pointer invalid, we will block virtio-gpu
> + * command processing until MR is fully unreferenced and freed.
> + */
> + OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
> +
> + res->mr = mr;
> +
> + return 0;
> +}
> +
> +static int
> +virtio_gpu_virgl_async_unmap_resource_blob(VirtIOGPU *g,
> + struct virtio_gpu_virgl_resource *res)
> +{
> + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
> + MemoryRegion *mr = res->mr;
> + int ret;
> +
> + /*
> + * Perform async unmapping in 3 steps:
> + *
> + * 1. Begin async unmapping with memory_region_del_subregion()
> + * and suspend/block cmd processing.
> + * 2. Wait for res->mr to be freed and cmd processing resumed
> + * asynchronously by virtio_gpu_virgl_hostmem_region_free().
> + * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
> + */
> + if (mr) {
> + /* render will be unblocked once MR is freed */
> + b->renderer_blocked++;
> +
> + /* memory region owns self res->mr object and frees it by itself */
> + memory_region_set_enabled(mr, false);
> + memory_region_del_subregion(&b->hostmem, mr);
> + object_unparent(OBJECT(mr));
> + } else {
> + ret = virgl_renderer_resource_unmap(res->base.resource_id);
> + if (ret) {
> + qemu_log_mask(LOG_GUEST_ERROR,
> + "%s: failed to unmap virgl resource: %s\n",
> + __func__, strerror(-ret));
> + return ret;
> + }
> + }
> +
> + return 0;
> +}
> +#endif /* HAVE_VIRGL_RESOURCE_BLOB */
> +
> static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
> struct virtio_gpu_ctrl_command *cmd)
> {
> @@ -146,12 +280,14 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
> }
>
> static void virgl_cmd_resource_unref(VirtIOGPU *g,
> - struct virtio_gpu_ctrl_command *cmd)
> + struct virtio_gpu_ctrl_command *cmd,
> + bool *cmd_suspended)
> {
> struct virtio_gpu_resource_unref unref;
> struct virtio_gpu_virgl_resource *res;
> struct iovec *res_iovs = NULL;
> int num_iovs = 0;
> + int ret;
>
> VIRTIO_GPU_FILL_CMD(unref);
> trace_virtio_gpu_cmd_res_unref(unref.resource_id);
> @@ -164,6 +300,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g,
> return;
> }
>
> + if (res->mr || res->async_unmap_in_progress) {
> + ret = virtio_gpu_virgl_async_unmap_resource_blob(g, res);
> + if (ret) {
> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
> + return;
> + }
> +
> + if (res->mr) {
I suggest letting virtio_gpu_virgl_async_unmap_resource_blob() having a
"suspended" parameter since we have a similar pattern also in
virgl_cmd_resource_unmap_blob().
You may also remove "async" from the name of
virtio_gpu_virgl_async_unmap_resource_blob() because:
- It's obvious it can be asynchronous if there is a "suspended"
parameter; asynchronous if it becomes true.
- It synchronously completes the operation if the memory region is
already deleted. Again, it's obvious if there is a "suspended"
parameter; synchrouns if it becomes false.
- You don't name virgl_cmd_resource_unref() like
virgl_cmd_resource_async_unref().
> + res->async_unmap_in_progress = true;
> + *cmd_suspended = true;
> + return;
> + } else {
> + res->async_unmap_in_progress = false;
> + }
> + }
> +
> virgl_renderer_resource_detach_iov(unref.resource_id,
> &res_iovs,
> &num_iovs);
> @@ -514,6 +666,137 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
> }
>
> #ifdef HAVE_VIRGL_RESOURCE_BLOB
> +static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
> + struct virtio_gpu_ctrl_command *cmd)
> +{
> + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
> + struct virtio_gpu_resource_create_blob cblob;
> + struct virtio_gpu_virgl_resource *res;
> + int ret;
> +
> + if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
> + return;
> + }
> +
> + VIRTIO_GPU_FILL_CMD(cblob);
> + virtio_gpu_create_blob_bswap(&cblob);
> + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
> +
> + if (cblob.resource_id == 0) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
> + __func__);
> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> + return;
> + }
> +
> + res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
> + if (res) {
> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
> + __func__, cblob.resource_id);
> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
> + return;
> + }
> +
> + res = g_new0(struct virtio_gpu_virgl_resource, 1);
> + res->base.resource_id = cblob.resource_id;
> + res->base.blob_size = cblob.size;
> + res->base.dmabuf_fd = -1;
> +
> + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
> + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
> + cmd, &res->base.addrs,
> + &res->base.iov, &res->base.iov_cnt);
> + if (!ret) {
> + g_free(res);
Use g_autofree instead of writing duplicate g_free() calls. See
docs/devel/style.rst for details.
© 2016 - 2026 Red Hat, Inc.