[libvirt] [PATCH 03/11] qemu: Implement the ability to return IOThread stats

John Ferlan posted 11 patches 6 years, 7 months ago
There is a newer version of this series
[libvirt] [PATCH 03/11] qemu: Implement the ability to return IOThread stats
Posted by John Ferlan 6 years, 7 months ago
Process the IOThreads polling stats if available. Generate the
output params record to be returned to the caller with the three
values - poll-max-ns, poll-grow, and poll-shrink.

Signed-off-by: John Ferlan <jferlan@redhat.com>
---
 include/libvirt/libvirt-domain.h |  1 +
 src/libvirt-domain.c             | 38 ++++++++++++++++
 src/qemu/qemu_driver.c           | 78 ++++++++++++++++++++++++++++++++
 3 files changed, 117 insertions(+)

diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
index fdd2d6b8ea..58fd4bc10c 100644
--- a/include/libvirt/libvirt-domain.h
+++ b/include/libvirt/libvirt-domain.h
@@ -2048,6 +2048,7 @@ typedef enum {
     VIR_DOMAIN_STATS_INTERFACE = (1 << 4), /* return domain interfaces info */
     VIR_DOMAIN_STATS_BLOCK = (1 << 5), /* return domain block info */
     VIR_DOMAIN_STATS_PERF = (1 << 6), /* return domain perf event info */
+    VIR_DOMAIN_STATS_IOTHREAD = (1 << 7), /* return iothread poll info */
 } virDomainStatsTypes;
 
 typedef enum {
diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c
index 7690339521..9fda56d660 100644
--- a/src/libvirt-domain.c
+++ b/src/libvirt-domain.c
@@ -11499,6 +11499,44 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
  *                               long long. It is produced by the
  *                               emulation_faults perf event
  *
+ * VIR_DOMAIN_STATS_IOTHREAD:
+ *     Return IOThread statistics if available. IOThread polling is a
+ *     timing mechanism that allows the hypervisor to generate a longer
+ *     period of time in which the guest will perform operations on the
+ *     CPU being used by the IOThread. The higher the value for poll-max-ns
+ *     the longer the guest will keep the CPU. This may affect other host
+ *     threads using the CPU. The poll-grow and poll-shrink values allow
+ *     the hypervisor to generate a mechanism to add or remove polling time
+ *     within the confines of 0 and poll-max-ns. For QEMU, the poll-grow is
+ *     multiplied by the polling interval, while poll-shrink is used as a
+ *     divisor. When not provided, QEMU may double the polling time until
+ *     poll-max-ns is reached. When poll-shrink is 0 (zero) QEMU may reset
+ *     the polling interval to 0 until it finds its "sweet spot". Setting
+ *     poll-grow too large may cause frequent fluctution of the time; however,
+ *     this can be tempered by a high poll-shrink to reduce the polling
+ *     interval. For example, a poll-grow of 3 will triple the polling time
+ *     which could quickly exceed poll-max-ns; however, a poll-shrink of
+ *     10 would cut that polling time more gradually.
+ *
+ *     The typed parameter keys are in this format:
+ *
+ *     "iothread.cnt" - maximum number of IOThreads in the subsequent list
+ *                      as unsigned int. Each IOThread in the list will
+ *                      will use it's iothread_id value as the <id>. There
+ *                      may be fewer <id> entries than the iothread.cnt
+ *                      value if the polling values are not supported.
+ *     "iothread.<id>.poll-max-ns" - maximum polling time in ns as an unsigned
+ *                                   long long. A 0 (zero) means polling is
+ *                                   disabled.
+ *     "iothread.<id>.poll-grow" - polling time factor as an unsigned int.
+ *                                 A 0 (zero) indicates to allow the underlying
+ *                                 hypervisor to choose how to grow the
+ *                                 polling time.
+ *     "iothread.<id>.poll-shrink" - polling time divisor as an unsigned int.
+ *                                 A 0 (zero) indicates to allow the underlying
+ *                                 hypervisor to choose how to shrink the
+ *                                 polling time.
+ *
  * Note that entire stats groups or individual stat fields may be missing from
  * the output in case they are not supported by the given hypervisor, are not
  * applicable for the current state of the guest domain, or their retrieval
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index e0edb43557..ff87865fe6 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -20431,6 +20431,83 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver,
 
 #undef QEMU_ADD_NAME_PARAM
 
+#define QEMU_ADD_IOTHREAD_PARAM_UI(record, maxparams, id, name, value) \
+    do { \
+        char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
+        snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
+                 "block.%u.%s", id, name); \
+        if (virTypedParamsAddUInt(&(record)->params, \
+                                  &(record)->nparams, \
+                                  maxparams, \
+                                  param_name, \
+                                  value) < 0) \
+            goto cleanup; \
+    } while (0)
+
+#define QEMU_ADD_IOTHREAD_PARAM_ULL(record, maxparams, id, name, value) \
+do { \
+    char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
+    snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
+             "iothread.%u.%s", id, name); \
+    if (virTypedParamsAddULLong(&(record)->params, \
+                                &(record)->nparams, \
+                                maxparams, \
+                                param_name, \
+                                value) < 0) \
+        goto cleanup; \
+} while (0)
+
+static int
+qemuDomainGetStatsIOThread(virQEMUDriverPtr driver,
+                           virDomainObjPtr dom,
+                           virDomainStatsRecordPtr record,
+                           int *maxparams,
+                           unsigned int privflags ATTRIBUTE_UNUSED)
+{
+    size_t i;
+    qemuMonitorIOThreadInfoPtr *iothreads = NULL;
+    int niothreads;
+    int ret = -1;
+
+    if ((niothreads = qemuDomainGetIOThreadsMon(driver, dom, &iothreads)) < 0)
+        return -1;
+
+    if (niothreads == 0)
+        return 0;
+
+    QEMU_ADD_COUNT_PARAM(record, maxparams, "iothread", niothreads);
+
+    for (i = 0; i < niothreads; i++) {
+        if (iothreads[i]->poll_valid) {
+            QEMU_ADD_IOTHREAD_PARAM_ULL(record, maxparams,
+                                        iothreads[i]->iothread_id,
+                                        "poll-max-ns",
+                                        iothreads[i]->poll_max_ns);
+            QEMU_ADD_IOTHREAD_PARAM_UI(record, maxparams,
+                                       iothreads[i]->iothread_id,
+                                       "poll-grow",
+                                       iothreads[i]->poll_grow);
+            QEMU_ADD_IOTHREAD_PARAM_UI(record, maxparams,
+                                       iothreads[i]->iothread_id,
+                                       "poll-shrink",
+                                       iothreads[i]->poll_shrink);
+        }
+    }
+
+    ret = 0;
+
+ cleanup:
+    for (i = 0; i < niothreads; i++)
+        VIR_FREE(iothreads[i]);
+    VIR_FREE(iothreads);
+
+    return ret;
+}
+
+#undef QEMU_ADD_IOTHREAD_PARAM_UI
+
+#undef QEMU_ADD_IOTHREAD_PARAM_ULL
+
 #undef QEMU_ADD_COUNT_PARAM
 
 static int
@@ -20505,6 +20582,7 @@ static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
     { qemuDomainGetStatsInterface, VIR_DOMAIN_STATS_INTERFACE, false },
     { qemuDomainGetStatsBlock, VIR_DOMAIN_STATS_BLOCK, true },
     { qemuDomainGetStatsPerf, VIR_DOMAIN_STATS_PERF, false },
+    { qemuDomainGetStatsIOThread, VIR_DOMAIN_STATS_IOTHREAD, false },
     { NULL, 0, false }
 };
 
-- 
2.17.1

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 03/11] qemu: Implement the ability to return IOThread stats
Posted by Michal Privoznik 6 years, 6 months ago
On 10/07/2018 03:00 PM, John Ferlan wrote:
> Process the IOThreads polling stats if available. Generate the
> output params record to be returned to the caller with the three
> values - poll-max-ns, poll-grow, and poll-shrink.
> 
> Signed-off-by: John Ferlan <jferlan@redhat.com>
> ---
>  include/libvirt/libvirt-domain.h |  1 +
>  src/libvirt-domain.c             | 38 ++++++++++++++++
>  src/qemu/qemu_driver.c           | 78 ++++++++++++++++++++++++++++++++
>  3 files changed, 117 insertions(+)
> 
> diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
> index fdd2d6b8ea..58fd4bc10c 100644
> --- a/include/libvirt/libvirt-domain.h
> +++ b/include/libvirt/libvirt-domain.h
> @@ -2048,6 +2048,7 @@ typedef enum {
>      VIR_DOMAIN_STATS_INTERFACE = (1 << 4), /* return domain interfaces info */
>      VIR_DOMAIN_STATS_BLOCK = (1 << 5), /* return domain block info */
>      VIR_DOMAIN_STATS_PERF = (1 << 6), /* return domain perf event info */
> +    VIR_DOMAIN_STATS_IOTHREAD = (1 << 7), /* return iothread poll info */
>  } virDomainStatsTypes;
>  
>  typedef enum {
> diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c
> index 7690339521..9fda56d660 100644
> --- a/src/libvirt-domain.c
> +++ b/src/libvirt-domain.c
> @@ -11499,6 +11499,44 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
>   *                               long long. It is produced by the
>   *                               emulation_faults perf event
>   *
> + * VIR_DOMAIN_STATS_IOTHREAD:
> + *     Return IOThread statistics if available. IOThread polling is a
> + *     timing mechanism that allows the hypervisor to generate a longer
> + *     period of time in which the guest will perform operations on the
> + *     CPU being used by the IOThread. The higher the value for poll-max-ns
> + *     the longer the guest will keep the CPU. This may affect other host
> + *     threads using the CPU. The poll-grow and poll-shrink values allow
> + *     the hypervisor to generate a mechanism to add or remove polling time
> + *     within the confines of 0 and poll-max-ns. For QEMU, the poll-grow is
> + *     multiplied by the polling interval, while poll-shrink is used as a
> + *     divisor. When not provided, QEMU may double the polling time until
> + *     poll-max-ns is reached. When poll-shrink is 0 (zero) QEMU may reset
> + *     the polling interval to 0 until it finds its "sweet spot". Setting
> + *     poll-grow too large may cause frequent fluctution of the time; however,
> + *     this can be tempered by a high poll-shrink to reduce the polling
> + *     interval. For example, a poll-grow of 3 will triple the polling time
> + *     which could quickly exceed poll-max-ns; however, a poll-shrink of
> + *     10 would cut that polling time more gradually.
> + *
> + *     The typed parameter keys are in this format:
> + *
> + *     "iothread.cnt" - maximum number of IOThreads in the subsequent list
> + *                      as unsigned int. Each IOThread in the list will
> + *                      will use it's iothread_id value as the <id>. There
> + *                      may be fewer <id> entries than the iothread.cnt
> + *                      value if the polling values are not supported.
> + *     "iothread.<id>.poll-max-ns" - maximum polling time in ns as an unsigned
> + *                                   long long. A 0 (zero) means polling is
> + *                                   disabled.
> + *     "iothread.<id>.poll-grow" - polling time factor as an unsigned int.
> + *                                 A 0 (zero) indicates to allow the underlying
> + *                                 hypervisor to choose how to grow the
> + *                                 polling time.
> + *     "iothread.<id>.poll-shrink" - polling time divisor as an unsigned int.
> + *                                 A 0 (zero) indicates to allow the underlying
> + *                                 hypervisor to choose how to shrink the
> + *                                 polling time.
> + *
>   * Note that entire stats groups or individual stat fields may be missing from
>   * the output in case they are not supported by the given hypervisor, are not
>   * applicable for the current state of the guest domain, or their retrieval
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index e0edb43557..ff87865fe6 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -20431,6 +20431,83 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver,
>  
>  #undef QEMU_ADD_NAME_PARAM
>  
> +#define QEMU_ADD_IOTHREAD_PARAM_UI(record, maxparams, id, name, value) \
> +    do { \
> +        char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
> +        snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
> +                 "block.%u.%s", id, name); \

s/block/iothread/

> +        if (virTypedParamsAddUInt(&(record)->params, \
> +                                  &(record)->nparams, \
> +                                  maxparams, \
> +                                  param_name, \
> +                                  value) < 0) \
> +            goto cleanup; \
> +    } while (0)
> +
> +#define QEMU_ADD_IOTHREAD_PARAM_ULL(record, maxparams, id, name, value) \
> +do { \
> +    char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
> +    snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
> +             "iothread.%u.%s", id, name); \
> +    if (virTypedParamsAddULLong(&(record)->params, \
> +                                &(record)->nparams, \
> +                                maxparams, \
> +                                param_name, \
> +                                value) < 0) \
> +        goto cleanup; \
> +} while (0)
> +
> +static int
> +qemuDomainGetStatsIOThread(virQEMUDriverPtr driver,
> +                           virDomainObjPtr dom,
> +                           virDomainStatsRecordPtr record,
> +                           int *maxparams,
> +                           unsigned int privflags ATTRIBUTE_UNUSED)
> +{
> +    size_t i;
> +    qemuMonitorIOThreadInfoPtr *iothreads = NULL;
> +    int niothreads;
> +    int ret = -1;

This MUST have the check for job - it is talking to a monitor and
therefore @dom is locked and unlocked meanwhile. You can take a look at
qemuDomainGetStatsBlock() what the check should look like.

> +
> +    if ((niothreads = qemuDomainGetIOThreadsMon(driver, dom, &iothreads)) < 0)
> +        return -1;

Worse, since you moved acquiring the job here, this will fail miserably
when running all stats for a domain. Because the code works like this:

1) it calls qemuDomainGetStatsNeedMonitor() to figure out if monitor is
needed and if it is a job is started,
2) it iterate over all stats callbacks,
3) this function is reached which tries to set the job again.

> +
> +    if (niothreads == 0)
> +        return 0;
> +
> +    QEMU_ADD_COUNT_PARAM(record, maxparams, "iothread", niothreads);
> +
> +    for (i = 0; i < niothreads; i++) {
> +        if (iothreads[i]->poll_valid) {
> +            QEMU_ADD_IOTHREAD_PARAM_ULL(record, maxparams,
> +                                        iothreads[i]->iothread_id,
> +                                        "poll-max-ns",
> +                                        iothreads[i]->poll_max_ns);
> +            QEMU_ADD_IOTHREAD_PARAM_UI(record, maxparams,
> +                                       iothreads[i]->iothread_id,
> +                                       "poll-grow",
> +                                       iothreads[i]->poll_grow);
> +            QEMU_ADD_IOTHREAD_PARAM_UI(record, maxparams,
> +                                       iothreads[i]->iothread_id,
> +                                       "poll-shrink",
> +                                       iothreads[i]->poll_shrink);
> +        }
> +    }
> +
> +    ret = 0;
> +
> + cleanup:
> +    for (i = 0; i < niothreads; i++)
> +        VIR_FREE(iothreads[i]);
> +    VIR_FREE(iothreads);
> +
> +    return ret;
> +}
> +
> +#undef QEMU_ADD_IOTHREAD_PARAM_UI
> +
> +#undef QEMU_ADD_IOTHREAD_PARAM_ULL
> +
>  #undef QEMU_ADD_COUNT_PARAM
>  
>  static int
> @@ -20505,6 +20582,7 @@ static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
>      { qemuDomainGetStatsInterface, VIR_DOMAIN_STATS_INTERFACE, false },
>      { qemuDomainGetStatsBlock, VIR_DOMAIN_STATS_BLOCK, true },
>      { qemuDomainGetStatsPerf, VIR_DOMAIN_STATS_PERF, false },
> +    { qemuDomainGetStatsIOThread, VIR_DOMAIN_STATS_IOTHREAD, false },

s/false/true/   The function you're calling does require access to the
monitor.

>      { NULL, 0, false }
>  };
>  
> 


Michal

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list