[RFC v3] virtio/vsock: add two more queues for datagram types

Jiang Wang posted 1 patch 2 years, 10 months ago
Failed in applying to current master (apply log)
hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
hw/virtio/vhost-vsock.c                       |  3 ++
include/hw/virtio/vhost-vsock-common.h        |  3 +-
include/hw/virtio/vhost-vsock.h               |  4 ++
include/standard-headers/linux/virtio_vsock.h |  3 ++
5 files changed, 63 insertions(+), 3 deletions(-)
[RFC v3] virtio/vsock: add two more queues for datagram types
Posted by Jiang Wang 2 years, 10 months ago
Datagram sockets are connectionless and unreliable.
The sender does not know the capacity of the receiver
and may send more packets than the receiver can handle.

Add two more dedicate virtqueues for datagram sockets,
so that it will not unfairly steal resources from
stream and future connection-oriented sockets.
---
v1 -> v2: use qemu cmd option to control number of queues,
        removed configuration settings for dgram.
v2 -> v3: use ioctl to get features and decie numbr of
       virt queues, instead of qemu cmd option.

btw: this patch is based on Arseny's SEQPACKET patch.

 hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
 hw/virtio/vhost-vsock.c                       |  3 ++
 include/hw/virtio/vhost-vsock-common.h        |  3 +-
 include/hw/virtio/vhost-vsock.h               |  4 ++
 include/standard-headers/linux/virtio_vsock.h |  3 ++
 5 files changed, 63 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index 4ad6e234ad..8164e09445 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -17,6 +17,8 @@
 #include "hw/virtio/vhost-vsock.h"
 #include "qemu/iov.h"
 #include "monitor/monitor.h"
+#include <sys/ioctl.h>
+#include <linux/vhost.h>
 
 int vhost_vsock_common_start(VirtIODevice *vdev)
 {
@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
     return 0;
 }
 
+static int vhost_vsock_get_max_qps(void)
+{
+    uint64_t features;
+    int ret;
+    int fd = -1;
+
+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
+    if (fd == -1) {
+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
+        return -1;
+    }
+
+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
+    if (ret) {
+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
+        qemu_close(fd);
+        return ret;
+    }
+
+    qemu_close(fd);
+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
+        return MAX_VQS_WITH_DGRAM;
+
+    return MAX_VQS_WITHOUT_DGRAM;
+}
+
 void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
 {
     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
 
     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
                 sizeof(struct virtio_vsock_config));
@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
                                        vhost_vsock_common_handle_output);
 
+    nvqs = vhost_vsock_get_max_qps();
+
+    if (nvqs < 0)
+        nvqs = MAX_VQS_WITHOUT_DGRAM;
+
+    if (nvqs == MAX_VQS_WITH_DGRAM) {
+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
+                                              vhost_vsock_common_handle_output);
+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
+                                               vhost_vsock_common_handle_output);
+    }
+
     /* The event queue belongs to QEMU */
     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
                                        vhost_vsock_common_handle_output);
 
-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
+    vvc->vhost_dev.nvqs = nvqs;
+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
 
     vvc->post_load_timer = NULL;
 }
@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
 
     virtio_delete_queue(vvc->recv_vq);
     virtio_delete_queue(vvc->trans_vq);
+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
+        virtio_delete_queue(vvc->dgram_recv_vq);
+        virtio_delete_queue(vvc->dgram_trans_vq);
+    }
+
+    if (vvc->vhost_dev.vqs)
+        g_free(vvc->vhost_dev.vqs);
+
     virtio_delete_queue(vvc->event_vq);
     virtio_cleanup(vdev);
 }
diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
index 1b1a5c70ed..33bbe16983 100644
--- a/hw/virtio/vhost-vsock.c
+++ b/hw/virtio/vhost-vsock.c
@@ -23,6 +23,7 @@
 
 const int feature_bits[] = {
     VIRTIO_VSOCK_F_SEQPACKET,
+    VIRTIO_VSOCK_F_DGRAM,
     VHOST_INVALID_FEATURE_BIT
 };
 
@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
 
     virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET);
+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
     return vhost_get_features(&vvc->vhost_dev, feature_bits,
                                 requested_features);
 }
diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
index e412b5ee98..798715241f 100644
--- a/include/hw/virtio/vhost-vsock-common.h
+++ b/include/hw/virtio/vhost-vsock-common.h
@@ -27,12 +27,13 @@ enum {
 struct VHostVSockCommon {
     VirtIODevice parent;
 
-    struct vhost_virtqueue vhost_vqs[2];
     struct vhost_dev vhost_dev;
 
     VirtQueue *event_vq;
     VirtQueue *recv_vq;
     VirtQueue *trans_vq;
+    VirtQueue *dgram_recv_vq;
+    VirtQueue *dgram_trans_vq;
 
     QEMUTimer *post_load_timer;
 };
diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
index 84f4e727c7..e10319785d 100644
--- a/include/hw/virtio/vhost-vsock.h
+++ b/include/hw/virtio/vhost-vsock.h
@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
 typedef struct {
     uint64_t guest_cid;
     char *vhostfd;
+    bool enable_dgram;
 } VHostVSockConf;
 
 struct VHostVSock {
@@ -33,4 +34,7 @@ struct VHostVSock {
     /*< public >*/
 };
 
+#define MAX_VQS_WITHOUT_DGRAM 2
+#define MAX_VQS_WITH_DGRAM 4
+
 #endif /* QEMU_VHOST_VSOCK_H */
diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
index 5eac522ee2..6ff8c5084c 100644
--- a/include/standard-headers/linux/virtio_vsock.h
+++ b/include/standard-headers/linux/virtio_vsock.h
@@ -41,6 +41,9 @@
 /* The feature bitmap for virtio vsock */
 #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET supported */
 
+/* Feature bits */
+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */
+
 struct virtio_vsock_config {
 	uint64_t guest_cid;
 } QEMU_PACKED;
-- 
2.11.0


Re: [RFC v3] virtio/vsock: add two more queues for datagram types
Posted by Stefano Garzarella 2 years, 10 months ago
On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
>Datagram sockets are connectionless and unreliable.
>The sender does not know the capacity of the receiver
>and may send more packets than the receiver can handle.
>
>Add two more dedicate virtqueues for datagram sockets,
>so that it will not unfairly steal resources from
>stream and future connection-oriented sockets.
>---
>v1 -> v2: use qemu cmd option to control number of queues,
>        removed configuration settings for dgram.
>v2 -> v3: use ioctl to get features and decie numbr of
>       virt queues, instead of qemu cmd option.
>
>btw: this patch is based on Arseny's SEQPACKET patch.
>
> hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
> hw/virtio/vhost-vsock.c                       |  3 ++
> include/hw/virtio/vhost-vsock-common.h        |  3 +-
> include/hw/virtio/vhost-vsock.h               |  4 ++
> include/standard-headers/linux/virtio_vsock.h |  3 ++
> 5 files changed, 63 insertions(+), 3 deletions(-)
>
>diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
>index 4ad6e234ad..8164e09445 100644
>--- a/hw/virtio/vhost-vsock-common.c
>+++ b/hw/virtio/vhost-vsock-common.c
>@@ -17,6 +17,8 @@
> #include "hw/virtio/vhost-vsock.h"
> #include "qemu/iov.h"
> #include "monitor/monitor.h"
>+#include <sys/ioctl.h>
>+#include <linux/vhost.h>
>
> int vhost_vsock_common_start(VirtIODevice *vdev)
> {
>@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
>     return 0;
> }
>
>+static int vhost_vsock_get_max_qps(void)
>+{
>+    uint64_t features;
>+    int ret;
>+    int fd = -1;
>+
>+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
>+    if (fd == -1) {
>+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
>+        return -1;
>+    }

You should use the `vhostfd` already opened in 
vhost_vsock_device_realize(), since QEMU may not have permission to 
access to the device, and the file descriptor can be passed from the 
management layer.

>+
>+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
>+    if (ret) {
>+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
>+        qemu_close(fd);
>+        return ret;
>+    }
>+
>+    qemu_close(fd);
>+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
>+        return MAX_VQS_WITH_DGRAM;
>+
>+    return MAX_VQS_WITHOUT_DGRAM;
>+}
>+
> void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> {
>     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
>
>     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
>                 sizeof(struct virtio_vsock_config));
>@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
>     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>                                        vhost_vsock_common_handle_output);
>
>+    nvqs = vhost_vsock_get_max_qps();

You can't do this here, since the vhost-vsock-common.c functions are 
used also by vhost-user-vsock, that doesn't use the /dev/vhost-vsock 
device since the device is emulated in a separate user process.

I think you can use something similar to what you did in v2, where you 
passed a parameter to vhost_vsock_common_realize() to enable or not the 
datagram queues.

>+
>+    if (nvqs < 0)
>+        nvqs = MAX_VQS_WITHOUT_DGRAM;
>+
>+    if (nvqs == MAX_VQS_WITH_DGRAM) {
>+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>+                                              vhost_vsock_common_handle_output);
>+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>+                                               vhost_vsock_common_handle_output);
>+    }
>+
>     /* The event queue belongs to QEMU */
>     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>                                        vhost_vsock_common_handle_output);

Did you do a test with a guest that doesn't support datagram with QEMU 
and hosts that do?

I repost my thoughts that I had on v2:

     What happen if the guest doesn't support dgram?

     I think we should dynamically use the 3rd queue or the 5th queue for 
     the events at runtime after the guest acked the features.

     Maybe better to switch to an array of VirtQueue.

>
>-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
>-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
>+    vvc->vhost_dev.nvqs = nvqs;
>+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
>
>     vvc->post_load_timer = NULL;
> }
>@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
>
>     virtio_delete_queue(vvc->recv_vq);
>     virtio_delete_queue(vvc->trans_vq);
>+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
>+        virtio_delete_queue(vvc->dgram_recv_vq);
>+        virtio_delete_queue(vvc->dgram_trans_vq);
>+    }
>+
>+    if (vvc->vhost_dev.vqs)

g_free() already handles NULL pointers, so you can remove this check.

>+        g_free(vvc->vhost_dev.vqs);
>+
>     virtio_delete_queue(vvc->event_vq);
>     virtio_cleanup(vdev);
> }
>diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
>index 1b1a5c70ed..33bbe16983 100644
>--- a/hw/virtio/vhost-vsock.c
>+++ b/hw/virtio/vhost-vsock.c
>@@ -23,6 +23,7 @@
>
> const int feature_bits[] = {
>     VIRTIO_VSOCK_F_SEQPACKET,
>+    VIRTIO_VSOCK_F_DGRAM,
>     VHOST_INVALID_FEATURE_BIT
> };
>
>@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
>     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>
>     virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET);
>+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
>+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
>     return vhost_get_features(&vvc->vhost_dev, feature_bits,
>                                 requested_features);
> }
>diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
>index e412b5ee98..798715241f 100644
>--- a/include/hw/virtio/vhost-vsock-common.h
>+++ b/include/hw/virtio/vhost-vsock-common.h
>@@ -27,12 +27,13 @@ enum {
> struct VHostVSockCommon {
>     VirtIODevice parent;
>
>-    struct vhost_virtqueue vhost_vqs[2];
>     struct vhost_dev vhost_dev;
>
>     VirtQueue *event_vq;
>     VirtQueue *recv_vq;
>     VirtQueue *trans_vq;
>+    VirtQueue *dgram_recv_vq;
>+    VirtQueue *dgram_trans_vq;
>
>     QEMUTimer *post_load_timer;
> };
>diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
>index 84f4e727c7..e10319785d 100644
>--- a/include/hw/virtio/vhost-vsock.h
>+++ b/include/hw/virtio/vhost-vsock.h
>@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
> typedef struct {
>     uint64_t guest_cid;
>     char *vhostfd;
>+    bool enable_dgram;

Leftover?

> } VHostVSockConf;
>
> struct VHostVSock {
>@@ -33,4 +34,7 @@ struct VHostVSock {
>     /*< public >*/
> };
>
>+#define MAX_VQS_WITHOUT_DGRAM 2
>+#define MAX_VQS_WITH_DGRAM 4
>+
> #endif /* QEMU_VHOST_VSOCK_H */
>diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
>index 5eac522ee2..6ff8c5084c 100644
>--- a/include/standard-headers/linux/virtio_vsock.h
>+++ b/include/standard-headers/linux/virtio_vsock.h
>@@ -41,6 +41,9 @@
> /* The feature bitmap for virtio vsock */
> #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET supported */
>
>+/* Feature bits */
>+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */

Bit 0 is reserved for STREAM, IIRC also in the virtio-spec proposal you 
used bit 2 for DGRAM.

Thanks,
Stefano


Re: Re: [RFC v3] virtio/vsock: add two more queues for datagram types
Posted by Jiang Wang . 2 years, 10 months ago
On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
> >Datagram sockets are connectionless and unreliable.
> >The sender does not know the capacity of the receiver
> >and may send more packets than the receiver can handle.
> >
> >Add two more dedicate virtqueues for datagram sockets,
> >so that it will not unfairly steal resources from
> >stream and future connection-oriented sockets.
> >---
> >v1 -> v2: use qemu cmd option to control number of queues,
> >        removed configuration settings for dgram.
> >v2 -> v3: use ioctl to get features and decie numbr of
> >       virt queues, instead of qemu cmd option.
> >
> >btw: this patch is based on Arseny's SEQPACKET patch.
> >
> > hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
> > hw/virtio/vhost-vsock.c                       |  3 ++
> > include/hw/virtio/vhost-vsock-common.h        |  3 +-
> > include/hw/virtio/vhost-vsock.h               |  4 ++
> > include/standard-headers/linux/virtio_vsock.h |  3 ++
> > 5 files changed, 63 insertions(+), 3 deletions(-)
> >
> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> >index 4ad6e234ad..8164e09445 100644
> >--- a/hw/virtio/vhost-vsock-common.c
> >+++ b/hw/virtio/vhost-vsock-common.c
> >@@ -17,6 +17,8 @@
> > #include "hw/virtio/vhost-vsock.h"
> > #include "qemu/iov.h"
> > #include "monitor/monitor.h"
> >+#include <sys/ioctl.h>
> >+#include <linux/vhost.h>
> >
> > int vhost_vsock_common_start(VirtIODevice *vdev)
> > {
> >@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
> >     return 0;
> > }
> >
> >+static int vhost_vsock_get_max_qps(void)
> >+{
> >+    uint64_t features;
> >+    int ret;
> >+    int fd = -1;
> >+
> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
> >+    if (fd == -1) {
> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
> >+        return -1;
> >+    }
>
> You should use the `vhostfd` already opened in
> vhost_vsock_device_realize(), since QEMU may not have permission to
> access to the device, and the file descriptor can be passed from the
> management layer.
>
Sure. Will do.

> >+
> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
> >+    if (ret) {
> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
> >+        qemu_close(fd);
> >+        return ret;
> >+    }
> >+
> >+    qemu_close(fd);
> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
> >+        return MAX_VQS_WITH_DGRAM;
> >+
> >+    return MAX_VQS_WITHOUT_DGRAM;
> >+}
> >+
> > void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> > {
> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
> >
> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
> >                 sizeof(struct virtio_vsock_config));
> >@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >                                        vhost_vsock_common_handle_output);
> >
> >+    nvqs = vhost_vsock_get_max_qps();
>
> You can't do this here, since the vhost-vsock-common.c functions are
> used also by vhost-user-vsock, that doesn't use the /dev/vhost-vsock
> device since the device is emulated in a separate user process.
>
> I think you can use something similar to what you did in v2, where you
> passed a parameter to vhost_vsock_common_realize() to enable or not the
> datagram queues.
>
Just to make sure, the qemu parameter will only be used for vhost-user-vsock,
right? I think for the vhost-vsock kernel module, we will use ioctl and ignore
the qemu parameter?

> >+
> >+    if (nvqs < 0)
> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
> >+
> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >+                                              vhost_vsock_common_handle_output);
> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >+                                               vhost_vsock_common_handle_output);
> >+    }
> >+
> >     /* The event queue belongs to QEMU */
> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >                                        vhost_vsock_common_handle_output);
>
> Did you do a test with a guest that doesn't support datagram with QEMU
> and hosts that do?
>
Yes, and it works.

> I repost my thoughts that I had on v2:
>
>      What happen if the guest doesn't support dgram?
>
>      I think we should dynamically use the 3rd queue or the 5th queue for
>      the events at runtime after the guest acked the features.
>
>      Maybe better to switch to an array of VirtQueue.
>
I think in current V3, it  already dynamically use 3rd or 5th queue depending
on the feature bit.

> >
> >-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
> >-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
> >+    vvc->vhost_dev.nvqs = nvqs;
> >+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
> >
> >     vvc->post_load_timer = NULL;
> > }
> >@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
> >
> >     virtio_delete_queue(vvc->recv_vq);
> >     virtio_delete_queue(vvc->trans_vq);
> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
> >+        virtio_delete_queue(vvc->dgram_recv_vq);
> >+        virtio_delete_queue(vvc->dgram_trans_vq);
> >+    }
> >+
> >+    if (vvc->vhost_dev.vqs)
>
> g_free() already handles NULL pointers, so you can remove this check.
>
Got it.

> >+        g_free(vvc->vhost_dev.vqs);
> >+
> >     virtio_delete_queue(vvc->event_vq);
> >     virtio_cleanup(vdev);
> > }
> >diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
> >index 1b1a5c70ed..33bbe16983 100644
> >--- a/hw/virtio/vhost-vsock.c
> >+++ b/hw/virtio/vhost-vsock.c
> >@@ -23,6 +23,7 @@
> >
> > const int feature_bits[] = {
> >     VIRTIO_VSOCK_F_SEQPACKET,
> >+    VIRTIO_VSOCK_F_DGRAM,
> >     VHOST_INVALID_FEATURE_BIT
> > };
> >
> >@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >
> >     virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET);
> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
> >+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
> >     return vhost_get_features(&vvc->vhost_dev, feature_bits,
> >                                 requested_features);
> > }
> >diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
> >index e412b5ee98..798715241f 100644
> >--- a/include/hw/virtio/vhost-vsock-common.h
> >+++ b/include/hw/virtio/vhost-vsock-common.h
> >@@ -27,12 +27,13 @@ enum {
> > struct VHostVSockCommon {
> >     VirtIODevice parent;
> >
> >-    struct vhost_virtqueue vhost_vqs[2];
> >     struct vhost_dev vhost_dev;
> >
> >     VirtQueue *event_vq;
> >     VirtQueue *recv_vq;
> >     VirtQueue *trans_vq;
> >+    VirtQueue *dgram_recv_vq;
> >+    VirtQueue *dgram_trans_vq;
> >
> >     QEMUTimer *post_load_timer;
> > };
> >diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
> >index 84f4e727c7..e10319785d 100644
> >--- a/include/hw/virtio/vhost-vsock.h
> >+++ b/include/hw/virtio/vhost-vsock.h
> >@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
> > typedef struct {
> >     uint64_t guest_cid;
> >     char *vhostfd;
> >+    bool enable_dgram;
>
> Leftover?
>
Right, but to support vhost-vsock-user, I think I will add this parameter back?

> > } VHostVSockConf;
> >
> > struct VHostVSock {
> >@@ -33,4 +34,7 @@ struct VHostVSock {
> >     /*< public >*/
> > };
> >
> >+#define MAX_VQS_WITHOUT_DGRAM 2
> >+#define MAX_VQS_WITH_DGRAM 4
> >+
> > #endif /* QEMU_VHOST_VSOCK_H */
> >diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
> >index 5eac522ee2..6ff8c5084c 100644
> >--- a/include/standard-headers/linux/virtio_vsock.h
> >+++ b/include/standard-headers/linux/virtio_vsock.h
> >@@ -41,6 +41,9 @@
> > /* The feature bitmap for virtio vsock */
> > #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET supported */
> >
> >+/* Feature bits */
> >+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */
>
> Bit 0 is reserved for STREAM, IIRC also in the virtio-spec proposal you
> used bit 2 for DGRAM.
>
My bad, I did not update the code here yet.

> Thanks,
> Stefano
>

Re: [RFC v3] virtio/vsock: add two more queues for datagram types
Posted by Stefano Garzarella 2 years, 10 months ago
On Wed, Jul 07, 2021 at 09:52:46AM -0700, Jiang Wang . wrote:
>On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>>
>> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
>> >Datagram sockets are connectionless and unreliable.
>> >The sender does not know the capacity of the receiver
>> >and may send more packets than the receiver can handle.
>> >
>> >Add two more dedicate virtqueues for datagram sockets,
>> >so that it will not unfairly steal resources from
>> >stream and future connection-oriented sockets.
>> >---
>> >v1 -> v2: use qemu cmd option to control number of queues,
>> >        removed configuration settings for dgram.
>> >v2 -> v3: use ioctl to get features and decie numbr of
>> >       virt queues, instead of qemu cmd option.
>> >
>> >btw: this patch is based on Arseny's SEQPACKET patch.
>> >
>> > hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
>> > hw/virtio/vhost-vsock.c                       |  3 ++
>> > include/hw/virtio/vhost-vsock-common.h        |  3 +-
>> > include/hw/virtio/vhost-vsock.h               |  4 ++
>> > include/standard-headers/linux/virtio_vsock.h |  3 ++
>> > 5 files changed, 63 insertions(+), 3 deletions(-)
>> >
>> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
>> >index 4ad6e234ad..8164e09445 100644
>> >--- a/hw/virtio/vhost-vsock-common.c
>> >+++ b/hw/virtio/vhost-vsock-common.c
>> >@@ -17,6 +17,8 @@
>> > #include "hw/virtio/vhost-vsock.h"
>> > #include "qemu/iov.h"
>> > #include "monitor/monitor.h"
>> >+#include <sys/ioctl.h>
>> >+#include <linux/vhost.h>
>> >
>> > int vhost_vsock_common_start(VirtIODevice *vdev)
>> > {
>> >@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
>> >     return 0;
>> > }
>> >
>> >+static int vhost_vsock_get_max_qps(void)
>> >+{
>> >+    uint64_t features;
>> >+    int ret;
>> >+    int fd = -1;
>> >+
>> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
>> >+    if (fd == -1) {
>> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
>> >+        return -1;
>> >+    }
>>
>> You should use the `vhostfd` already opened in
>> vhost_vsock_device_realize(), since QEMU may not have permission to
>> access to the device, and the file descriptor can be passed from the
>> management layer.
>>
>Sure. Will do.
>
>> >+
>> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
>> >+    if (ret) {
>> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
>> >+        qemu_close(fd);
>> >+        return ret;
>> >+    }
>> >+
>> >+    qemu_close(fd);
>> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
>> >+        return MAX_VQS_WITH_DGRAM;
>> >+
>> >+    return MAX_VQS_WITHOUT_DGRAM;
>> >+}
>> >+
>> > void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
>> > {
>> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
>> >
>> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
>> >                 sizeof(struct virtio_vsock_config));
>> >@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice 
>> >*vdev, const char *name)
>> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >                                        vhost_vsock_common_handle_output);
>> >
>> >+    nvqs = vhost_vsock_get_max_qps();
>>
>> You can't do this here, since the vhost-vsock-common.c functions are
>> used also by vhost-user-vsock, that doesn't use the /dev/vhost-vsock
>> device since the device is emulated in a separate user process.
>>
>> I think you can use something similar to what you did in v2, where you
>> passed a parameter to vhost_vsock_common_realize() to enable or not the
>> datagram queues.
>>
>Just to make sure, the qemu parameter will only be used for vhost-user-vsock,
>right? I think for the vhost-vsock kernel module, we will use ioctl and ignore
>the qemu parameter?

No, I mean a function parameter in vhost_vsock_common_realize() that we 
set to true when datagram is supported by the backend.

You can move the vhost_vsock_get_max_qps() call in 
vhost_vsock_device_realize(), just before call 
vhost_vsock_common_realize() where we can pass a parameter to specify if 
datagram is supported or not.

For now in vhost-user-vsock you can always pass `false`. When we will 
support it, we can add something similar to discover the features.

Just to be clear, we don't need any QEMU command line parameter.

>
>> >+
>> >+    if (nvqs < 0)
>> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
>> >+
>> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
>> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >+                                              vhost_vsock_common_handle_output);
>> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >+                                               vhost_vsock_common_handle_output);
>> >+    }
>> >+
>> >     /* The event queue belongs to QEMU */
>> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >                                        vhost_vsock_common_handle_output);
>>
>> Did you do a test with a guest that doesn't support datagram with QEMU
>> and hosts that do?
>>
>Yes, and it works.
>
>> I repost my thoughts that I had on v2:
>>
>>      What happen if the guest doesn't support dgram?
>>
>>      I think we should dynamically use the 3rd queue or the 5th queue for
>>      the events at runtime after the guest acked the features.
>>
>>      Maybe better to switch to an array of VirtQueue.
>>
>I think in current V3, it  already dynamically use 3rd or 5th queue 
>depending
>on the feature bit.

I'm not sure. IIUC when vhost_vsock_common_realize() is called, we don't 
know the features acked by the guest, so how can it be dynamic?

Here we should know only if the host kernel supports it.

Maybe it works, because in QEMU we use the event queue only after a 
migration to send a reset event, so you can try to migrate a guest to 
check this path.

I'll be off until July 16th, after that I'll check better, but I think 
there is something wrong here and we should use the 3rd or 5th queue for 
events only after the guest acked the features.

>
>> >
>> >-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
>> >-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
>> >+    vvc->vhost_dev.nvqs = nvqs;
>> >+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
>> >
>> >     vvc->post_load_timer = NULL;
>> > }
>> >@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
>> >
>> >     virtio_delete_queue(vvc->recv_vq);
>> >     virtio_delete_queue(vvc->trans_vq);
>> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
>> >+        virtio_delete_queue(vvc->dgram_recv_vq);
>> >+        virtio_delete_queue(vvc->dgram_trans_vq);
>> >+    }
>> >+
>> >+    if (vvc->vhost_dev.vqs)
>>
>> g_free() already handles NULL pointers, so you can remove this check.
>>
>Got it.
>
>> >+        g_free(vvc->vhost_dev.vqs);
>> >+
>> >     virtio_delete_queue(vvc->event_vq);
>> >     virtio_cleanup(vdev);
>> > }
>> >diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
>> >index 1b1a5c70ed..33bbe16983 100644
>> >--- a/hw/virtio/vhost-vsock.c
>> >+++ b/hw/virtio/vhost-vsock.c
>> >@@ -23,6 +23,7 @@
>> >
>> > const int feature_bits[] = {
>> >     VIRTIO_VSOCK_F_SEQPACKET,
>> >+    VIRTIO_VSOCK_F_DGRAM,
>> >     VHOST_INVALID_FEATURE_BIT
>> > };
>> >
>> >@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
>> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>> >
>> >     virtio_add_feature(&requested_features, 
>> >     VIRTIO_VSOCK_F_SEQPACKET);
>> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
>> >+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
>> >     return vhost_get_features(&vvc->vhost_dev, feature_bits,
>> >                                 requested_features);
>> > }
>> >diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
>> >index e412b5ee98..798715241f 100644
>> >--- a/include/hw/virtio/vhost-vsock-common.h
>> >+++ b/include/hw/virtio/vhost-vsock-common.h
>> >@@ -27,12 +27,13 @@ enum {
>> > struct VHostVSockCommon {
>> >     VirtIODevice parent;
>> >
>> >-    struct vhost_virtqueue vhost_vqs[2];
>> >     struct vhost_dev vhost_dev;
>> >
>> >     VirtQueue *event_vq;
>> >     VirtQueue *recv_vq;
>> >     VirtQueue *trans_vq;
>> >+    VirtQueue *dgram_recv_vq;
>> >+    VirtQueue *dgram_trans_vq;
>> >
>> >     QEMUTimer *post_load_timer;
>> > };
>> >diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
>> >index 84f4e727c7..e10319785d 100644
>> >--- a/include/hw/virtio/vhost-vsock.h
>> >+++ b/include/hw/virtio/vhost-vsock.h
>> >@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
>> > typedef struct {
>> >     uint64_t guest_cid;
>> >     char *vhostfd;
>> >+    bool enable_dgram;
>>
>> Leftover?
>>
>Right, but to support vhost-vsock-user, I think I will add this 
>parameter back?

I'm not sure it is needed.

>
>> > } VHostVSockConf;
>> >
>> > struct VHostVSock {
>> >@@ -33,4 +34,7 @@ struct VHostVSock {
>> >     /*< public >*/
>> > };
>> >
>> >+#define MAX_VQS_WITHOUT_DGRAM 2
>> >+#define MAX_VQS_WITH_DGRAM 4
>> >+
>> > #endif /* QEMU_VHOST_VSOCK_H */
>> >diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
>> >index 5eac522ee2..6ff8c5084c 100644
>> >--- a/include/standard-headers/linux/virtio_vsock.h
>> >+++ b/include/standard-headers/linux/virtio_vsock.h
>> >@@ -41,6 +41,9 @@
>> > /* The feature bitmap for virtio vsock */
>> > #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET 
>> > supported */
>> >
>> >+/* Feature bits */
>> >+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */
>>
>> Bit 0 is reserved for STREAM, IIRC also in the virtio-spec proposal you
>> used bit 2 for DGRAM.
>>
>My bad, I did not update the code here yet.
>

No problems :-)

Thanks,
Stefano


Re: Re: [RFC v3] virtio/vsock: add two more queues for datagram types
Posted by Jiang Wang . 2 years, 10 months ago
On Wed, Jul 7, 2021 at 10:27 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Wed, Jul 07, 2021 at 09:52:46AM -0700, Jiang Wang . wrote:
> >On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >>
> >> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
> >> >Datagram sockets are connectionless and unreliable.
> >> >The sender does not know the capacity of the receiver
> >> >and may send more packets than the receiver can handle.
> >> >
> >> >Add two more dedicate virtqueues for datagram sockets,
> >> >so that it will not unfairly steal resources from
> >> >stream and future connection-oriented sockets.
> >> >---
> >> >v1 -> v2: use qemu cmd option to control number of queues,
> >> >        removed configuration settings for dgram.
> >> >v2 -> v3: use ioctl to get features and decie numbr of
> >> >       virt queues, instead of qemu cmd option.
> >> >
> >> >btw: this patch is based on Arseny's SEQPACKET patch.
> >> >
> >> > hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
> >> > hw/virtio/vhost-vsock.c                       |  3 ++
> >> > include/hw/virtio/vhost-vsock-common.h        |  3 +-
> >> > include/hw/virtio/vhost-vsock.h               |  4 ++
> >> > include/standard-headers/linux/virtio_vsock.h |  3 ++
> >> > 5 files changed, 63 insertions(+), 3 deletions(-)
> >> >
> >> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> >> >index 4ad6e234ad..8164e09445 100644
> >> >--- a/hw/virtio/vhost-vsock-common.c
> >> >+++ b/hw/virtio/vhost-vsock-common.c
> >> >@@ -17,6 +17,8 @@
> >> > #include "hw/virtio/vhost-vsock.h"
> >> > #include "qemu/iov.h"
> >> > #include "monitor/monitor.h"
> >> >+#include <sys/ioctl.h>
> >> >+#include <linux/vhost.h>
> >> >
> >> > int vhost_vsock_common_start(VirtIODevice *vdev)
> >> > {
> >> >@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
> >> >     return 0;
> >> > }
> >> >
> >> >+static int vhost_vsock_get_max_qps(void)
> >> >+{
> >> >+    uint64_t features;
> >> >+    int ret;
> >> >+    int fd = -1;
> >> >+
> >> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
> >> >+    if (fd == -1) {
> >> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
> >> >+        return -1;
> >> >+    }
> >>
> >> You should use the `vhostfd` already opened in
> >> vhost_vsock_device_realize(), since QEMU may not have permission to
> >> access to the device, and the file descriptor can be passed from the
> >> management layer.
> >>
> >Sure. Will do.
> >
> >> >+
> >> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
> >> >+    if (ret) {
> >> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
> >> >+        qemu_close(fd);
> >> >+        return ret;
> >> >+    }
> >> >+
> >> >+    qemu_close(fd);
> >> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
> >> >+        return MAX_VQS_WITH_DGRAM;
> >> >+
> >> >+    return MAX_VQS_WITHOUT_DGRAM;
> >> >+}
> >> >+
> >> > void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> >> > {
> >> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
> >> >
> >> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
> >> >                 sizeof(struct virtio_vsock_config));
> >> >@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice
> >> >*vdev, const char *name)
> >> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >                                        vhost_vsock_common_handle_output);
> >> >
> >> >+    nvqs = vhost_vsock_get_max_qps();
> >>
> >> You can't do this here, since the vhost-vsock-common.c functions are
> >> used also by vhost-user-vsock, that doesn't use the /dev/vhost-vsock
> >> device since the device is emulated in a separate user process.
> >>
> >> I think you can use something similar to what you did in v2, where you
> >> passed a parameter to vhost_vsock_common_realize() to enable or not the
> >> datagram queues.
> >>
> >Just to make sure, the qemu parameter will only be used for vhost-user-vsock,
> >right? I think for the vhost-vsock kernel module, we will use ioctl and ignore
> >the qemu parameter?
>
> No, I mean a function parameter in vhost_vsock_common_realize() that we
> set to true when datagram is supported by the backend.
>
> You can move the vhost_vsock_get_max_qps() call in
> vhost_vsock_device_realize(), just before call
> vhost_vsock_common_realize() where we can pass a parameter to specify if
> datagram is supported or not.
>
> For now in vhost-user-vsock you can always pass `false`. When we will
> support it, we can add something similar to discover the features.
>
> Just to be clear, we don't need any QEMU command line parameter.
>
Got it. That sounds good.

> >
> >> >+
> >> >+    if (nvqs < 0)
> >> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
> >> >+
> >> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> >> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >+                                              vhost_vsock_common_handle_output);
> >> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >+                                               vhost_vsock_common_handle_output);
> >> >+    }
> >> >+
> >> >     /* The event queue belongs to QEMU */
> >> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >                                        vhost_vsock_common_handle_output);
> >>
> >> Did you do a test with a guest that doesn't support datagram with QEMU
> >> and hosts that do?
> >>
> >Yes, and it works.
> >
> >> I repost my thoughts that I had on v2:
> >>
> >>      What happen if the guest doesn't support dgram?
> >>
> >>      I think we should dynamically use the 3rd queue or the 5th queue for
> >>      the events at runtime after the guest acked the features.
> >>
> >>      Maybe better to switch to an array of VirtQueue.
> >>
> >I think in current V3, it  already dynamically use 3rd or 5th queue
> >depending
> >on the feature bit.
>
> I'm not sure. IIUC when vhost_vsock_common_realize() is called, we don't
> know the features acked by the guest, so how can it be dynamic?
>
> Here we should know only if the host kernel supports it.
>
> Maybe it works, because in QEMU we use the event queue only after a
> migration to send a reset event, so you can try to migrate a guest to
> check this path.
>
I see. I did not test migration, so likely I did not check that code path.
Will give it a try.

> I'll be off until July 16th, after that I'll check better, but I think
> there is something wrong here and we should use the 3rd or 5th queue for
> events only after the guest acked the features.
>
Got it.

> >
> >> >
> >> >-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
> >> >-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
> >> >+    vvc->vhost_dev.nvqs = nvqs;
> >> >+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
> >> >
> >> >     vvc->post_load_timer = NULL;
> >> > }
> >> >@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
> >> >
> >> >     virtio_delete_queue(vvc->recv_vq);
> >> >     virtio_delete_queue(vvc->trans_vq);
> >> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
> >> >+        virtio_delete_queue(vvc->dgram_recv_vq);
> >> >+        virtio_delete_queue(vvc->dgram_trans_vq);
> >> >+    }
> >> >+
> >> >+    if (vvc->vhost_dev.vqs)
> >>
> >> g_free() already handles NULL pointers, so you can remove this check.
> >>
> >Got it.
> >
> >> >+        g_free(vvc->vhost_dev.vqs);
> >> >+
> >> >     virtio_delete_queue(vvc->event_vq);
> >> >     virtio_cleanup(vdev);
> >> > }
> >> >diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
> >> >index 1b1a5c70ed..33bbe16983 100644
> >> >--- a/hw/virtio/vhost-vsock.c
> >> >+++ b/hw/virtio/vhost-vsock.c
> >> >@@ -23,6 +23,7 @@
> >> >
> >> > const int feature_bits[] = {
> >> >     VIRTIO_VSOCK_F_SEQPACKET,
> >> >+    VIRTIO_VSOCK_F_DGRAM,
> >> >     VHOST_INVALID_FEATURE_BIT
> >> > };
> >> >
> >> >@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
> >> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >> >
> >> >     virtio_add_feature(&requested_features,
> >> >     VIRTIO_VSOCK_F_SEQPACKET);
> >> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
> >> >+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
> >> >     return vhost_get_features(&vvc->vhost_dev, feature_bits,
> >> >                                 requested_features);
> >> > }
> >> >diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
> >> >index e412b5ee98..798715241f 100644
> >> >--- a/include/hw/virtio/vhost-vsock-common.h
> >> >+++ b/include/hw/virtio/vhost-vsock-common.h
> >> >@@ -27,12 +27,13 @@ enum {
> >> > struct VHostVSockCommon {
> >> >     VirtIODevice parent;
> >> >
> >> >-    struct vhost_virtqueue vhost_vqs[2];
> >> >     struct vhost_dev vhost_dev;
> >> >
> >> >     VirtQueue *event_vq;
> >> >     VirtQueue *recv_vq;
> >> >     VirtQueue *trans_vq;
> >> >+    VirtQueue *dgram_recv_vq;
> >> >+    VirtQueue *dgram_trans_vq;
> >> >
> >> >     QEMUTimer *post_load_timer;
> >> > };
> >> >diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
> >> >index 84f4e727c7..e10319785d 100644
> >> >--- a/include/hw/virtio/vhost-vsock.h
> >> >+++ b/include/hw/virtio/vhost-vsock.h
> >> >@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
> >> > typedef struct {
> >> >     uint64_t guest_cid;
> >> >     char *vhostfd;
> >> >+    bool enable_dgram;
> >>
> >> Leftover?
> >>
> >Right, but to support vhost-vsock-user, I think I will add this
> >parameter back?
>
> I'm not sure it is needed.
>
> >
> >> > } VHostVSockConf;
> >> >
> >> > struct VHostVSock {
> >> >@@ -33,4 +34,7 @@ struct VHostVSock {
> >> >     /*< public >*/
> >> > };
> >> >
> >> >+#define MAX_VQS_WITHOUT_DGRAM 2
> >> >+#define MAX_VQS_WITH_DGRAM 4
> >> >+
> >> > #endif /* QEMU_VHOST_VSOCK_H */
> >> >diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
> >> >index 5eac522ee2..6ff8c5084c 100644
> >> >--- a/include/standard-headers/linux/virtio_vsock.h
> >> >+++ b/include/standard-headers/linux/virtio_vsock.h
> >> >@@ -41,6 +41,9 @@
> >> > /* The feature bitmap for virtio vsock */
> >> > #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET
> >> > supported */
> >> >
> >> >+/* Feature bits */
> >> >+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */
> >>
> >> Bit 0 is reserved for STREAM, IIRC also in the virtio-spec proposal you
> >> used bit 2 for DGRAM.
> >>
> >My bad, I did not update the code here yet.
> >
>
> No problems :-)
>
> Thanks,
> Stefano
>