[libvirt] [PATCH v4 11/12] qemu: Allow showing the dump progress for memory only dump

John Ferlan posted 12 patches 7 years, 3 months ago
There is a newer version of this series
[libvirt] [PATCH v4 11/12] qemu: Allow showing the dump progress for memory only dump
Posted by John Ferlan 7 years, 3 months ago
https://bugzilla.redhat.com/show_bug.cgi?id=916061

If the QEMU version running is new enough (based on the DUMP_COMPLETED
event), then we can add a 'detach' boolean to the dump-guest-memory
command in order to tell QEMU to run in a thread. This ensures that we
don't lock out other commands while the potentially long running dump
memory is completed.

This allows the usage of a qemuDumpWaitForCompletion which will wait
for the event while the qemuDomainGetJobInfoDumpStats can be used via
qemuDomainGetJobInfo in order to query QEMU to determine how far along
the job is.

Now that we have a true async job, we'll only set the dump_memory_only
flag only when @detach=false; otherwise, we note that the job is a
for stats dump this allows the opposite end for job info to determine
what to copy.

Signed-off-by: John Ferlan <jferlan@redhat.com>
---
 src/qemu/qemu_driver.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 60 insertions(+), 5 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index b807486de..7ed7986fe 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -3760,6 +3760,49 @@ qemuDomainManagedSaveRemove(virDomainPtr dom, unsigned int flags)
 }
 
 
+/**
+ * qemuDumpWaitForCompletion:
+ * @vm: domain object
+ *
+ * If the query dump capability exists, then it's possible to start a
+ * guest memory dump operation using a thread via a 'detach' qualifier
+ * to the dump guest memory command. This allows the async check if the
+ * dump is done.
+ *
+ * Returns 0 on success, -1 on failure
+ */
+static int
+qemuDumpWaitForCompletion(virDomainObjPtr vm)
+{
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+    int ret = -1;
+
+    VIR_DEBUG("Waiting for dump completion");
+    while (!priv->job.dumpCompleted && !priv->job.abortJob) {
+        if (virDomainObjWait(vm) < 0)
+            return -1;
+    }
+
+    if (priv->job.current->s.dumpStats.status == QEMU_MONITOR_DUMP_STATUS_FAILED) {
+        if (priv->job.error)
+            virReportError(VIR_ERR_OPERATION_FAILED,
+                           _("memory-only dump failed: %s"),
+                           priv->job.error);
+        else
+            virReportError(VIR_ERR_OPERATION_FAILED, "%s",
+                           _("memory-only dump failed for unknown reason"));
+
+        goto cleanup;
+    }
+    qemuDomainJobInfoUpdateTime(priv->job.current);
+
+    ret = 0;
+
+ cleanup:
+    return ret;
+}
+
+
 static int
 qemuDumpToFd(virQEMUDriverPtr driver,
              virDomainObjPtr vm,
@@ -3768,6 +3811,7 @@ qemuDumpToFd(virQEMUDriverPtr driver,
              const char *dumpformat)
 {
     qemuDomainObjPrivatePtr priv = vm->privateData;
+    bool detach = false;
     int ret = -1;
 
     if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DUMP_GUEST_MEMORY)) {
@@ -3776,11 +3820,17 @@ qemuDumpToFd(virQEMUDriverPtr driver,
         return -1;
     }
 
+    detach = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DUMP_COMPLETED);
+
     if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0)
         return -1;
 
-    VIR_FREE(priv->job.current);
-    priv->job.dump_memory_only = true;
+    if (detach) {
+        priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP;
+    } else {
+        VIR_FREE(priv->job.current);
+        priv->job.dump_memory_only = true;
+    }
 
     if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
         return -1;
@@ -3794,15 +3844,20 @@ qemuDumpToFd(virQEMUDriverPtr driver,
                              "for this QEMU binary"),
                            dumpformat);
             ret = -1;
+            ignore_value(qemuDomainObjExitMonitor(driver, vm));
             goto cleanup;
         }
     }
 
-    ret = qemuMonitorDumpToFd(priv->mon, fd, dumpformat, false);
+    ret = qemuMonitorDumpToFd(priv->mon, fd, dumpformat, detach);
 
- cleanup:
-    ignore_value(qemuDomainObjExitMonitor(driver, vm));
+    if ((qemuDomainObjExitMonitor(driver, vm) < 0) || ret < 0)
+        goto cleanup;
 
+    if (detach)
+        ret = qemuDumpWaitForCompletion(vm);
+
+ cleanup:
     return ret;
 }
 
-- 
2.13.6

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v4 11/12] qemu: Allow showing the dump progress for memory only dump
Posted by Jiri Denemark 7 years, 3 months ago
On Thu, Feb 01, 2018 at 18:24:42 -0500, John Ferlan wrote:
> https://bugzilla.redhat.com/show_bug.cgi?id=916061
> 
> If the QEMU version running is new enough (based on the DUMP_COMPLETED
> event), then we can add a 'detach' boolean to the dump-guest-memory
> command in order to tell QEMU to run in a thread. This ensures that we
> don't lock out other commands while the potentially long running dump
> memory is completed.
> 
> This allows the usage of a qemuDumpWaitForCompletion which will wait
> for the event while the qemuDomainGetJobInfoDumpStats can be used via
> qemuDomainGetJobInfo in order to query QEMU to determine how far along
> the job is.
> 
> Now that we have a true async job, we'll only set the dump_memory_only
> flag only when @detach=false; otherwise, we note that the job is a
> for stats dump this allows the opposite end for job info to determine
> what to copy.
> 
> Signed-off-by: John Ferlan <jferlan@redhat.com>
> ---
>  src/qemu/qemu_driver.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 60 insertions(+), 5 deletions(-)

Looks OK.

Jirka

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list