[libvirt] [v2 RESEND 3/3] qemu: Switch code to use new agent job APIs

Michal Privoznik posted 3 patches 7 years ago
There is a newer version of this series
[libvirt] [v2 RESEND 3/3] qemu: Switch code to use new agent job APIs
Posted by Michal Privoznik 7 years ago
There are two sets of functions here:
1) some functions talk on both monitor and agent monitor,
2) some functions only talk on agent monitor.

For functions from set 1) we need to use
qemuDomainObjBeginJobWithAgent() and for functions from set 2) we
need to use qemuDomainObjBeginAgentJob() only.

Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
---
 src/qemu/qemu_driver.c | 91 ++++++++++++++++++++++++++++++++------------------
 1 file changed, 58 insertions(+), 33 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 3abbe41895..cffd4c928a 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -1954,6 +1954,7 @@ static int qemuDomainShutdownFlags(virDomainPtr dom, unsigned int flags)
     bool useAgent = false, agentRequested, acpiRequested;
     bool isReboot = false;
     bool agentForced;
+    bool agentJob = false;
     int agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN;
 
     virCheckFlags(VIR_DOMAIN_SHUTDOWN_ACPI_POWER_BTN |
@@ -1980,9 +1981,14 @@ static int qemuDomainShutdownFlags(virDomainPtr dom, unsigned int flags)
     if (virDomainShutdownFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if ((!useAgent && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) ||
+        (useAgent && qemuDomainObjBeginJobWithAgent(driver, vm,
+                                                    QEMU_JOB_MODIFY,
+                                                    QEMU_AGENT_JOB_MODIFY) < 0))
         goto cleanup;
 
+    agentJob = useAgent;
+
     if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
         virReportError(VIR_ERR_OPERATION_INVALID,
                        "%s", _("domain is not running"));
@@ -2026,7 +2032,10 @@ static int qemuDomainShutdownFlags(virDomainPtr dom, unsigned int flags)
     }
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    if (agentJob)
+        qemuDomainObjEndJobWithAgent(driver, vm);
+    else
+        qemuDomainObjEndJob(driver, vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -2049,6 +2058,7 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags)
     bool useAgent = false, agentRequested, acpiRequested;
     bool isReboot = true;
     bool agentForced;
+    bool agentJob = false;
     int agentFlag = QEMU_AGENT_SHUTDOWN_REBOOT;
 
     virCheckFlags(VIR_DOMAIN_REBOOT_ACPI_POWER_BTN |
@@ -2075,9 +2085,14 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags)
     if (virDomainRebootEnsureACL(dom->conn, vm->def, flags) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if ((!useAgent && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) ||
+        (useAgent && qemuDomainObjBeginJobWithAgent(driver, vm,
+                                                    QEMU_JOB_MODIFY,
+                                                    QEMU_AGENT_JOB_MODIFY) < 0))
         goto cleanup;
 
+    agentJob = useAgent;
+
     agentForced = agentRequested && !acpiRequested;
     if (!qemuDomainAgentAvailable(vm, agentForced)) {
         if (agentForced)
@@ -2115,7 +2130,10 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags)
     }
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    if (agentJob)
+        qemuDomainObjEndJobWithAgent(driver, vm);
+    else
+        qemuDomainObjEndJob(driver, vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -4949,6 +4967,7 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
     virDomainDefPtr def;
     virDomainDefPtr persistentDef;
     bool hotpluggable = !!(flags & VIR_DOMAIN_VCPU_HOTPLUGGABLE);
+    bool useAgent = !!(flags & VIR_DOMAIN_VCPU_GUEST);
     int ret = -1;
 
     virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
@@ -4963,13 +4982,14 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
     if (virDomainSetVcpusFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if ((!useAgent && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) ||
+        (useAgent && qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0))
         goto cleanup;
 
     if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
         goto endjob;
 
-    if (flags & VIR_DOMAIN_VCPU_GUEST)
+    if (useAgent)
         ret = qemuDomainSetVcpusAgent(vm, nvcpus);
     else if (flags & VIR_DOMAIN_VCPU_MAXIMUM)
         ret = qemuDomainSetVcpusMax(driver, def, persistentDef, nvcpus);
@@ -4978,7 +4998,10 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
                                          nvcpus, hotpluggable);
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    if (useAgent)
+        qemuDomainObjEndAgentJob(vm);
+    else
+        qemuDomainObjEndJob(driver, vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -5429,7 +5452,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
         goto cleanup;
 
     if (flags & VIR_DOMAIN_VCPU_GUEST) {
-        if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+        if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
             goto cleanup;
 
         if (!virDomainObjIsActive(vm)) {
@@ -5447,7 +5470,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
         qemuDomainObjExitAgent(vm, agent);
 
  endjob:
-        qemuDomainObjEndJob(driver, vm);
+        qemuDomainObjEndAgentJob(vm);
 
         if (ncpuinfo < 0)
             goto cleanup;
@@ -18954,7 +18977,7 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom,
     if (virDomainPMSuspendForDurationEnsureACL(dom->conn, vm->def) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
         goto cleanup;
 
     if (virDomainObjCheckActive(vm) < 0)
@@ -18985,7 +19008,7 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom,
     qemuDomainObjExitAgent(vm, agent);
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndAgentJob(vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -19070,7 +19093,7 @@ qemuDomainQemuAgentCommand(virDomainPtr domain,
     if (virDomainQemuAgentCommandEnsureACL(domain->conn, vm->def) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
         goto cleanup;
 
     if (virDomainObjCheckActive(vm) < 0)
@@ -19088,7 +19111,7 @@ qemuDomainQemuAgentCommand(virDomainPtr domain,
         VIR_FREE(result);
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndAgentJob(vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -19170,7 +19193,7 @@ qemuDomainFSTrim(virDomainPtr dom,
     if (virDomainFSTrimEnsureACL(dom->conn, vm->def) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
         goto cleanup;
 
     if (!qemuDomainAgentAvailable(vm, true))
@@ -19184,7 +19207,7 @@ qemuDomainFSTrim(virDomainPtr dom,
     qemuDomainObjExitAgent(vm, agent);
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndAgentJob(vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -19352,7 +19375,7 @@ qemuDomainGetTime(virDomainPtr dom,
     if (virDomainGetTimeEnsureACL(dom->conn, vm->def) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
         goto cleanup;
 
     if (virDomainObjCheckActive(vm) < 0)
@@ -19371,7 +19394,7 @@ qemuDomainGetTime(virDomainPtr dom,
     ret = 0;
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndAgentJob(vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -19402,7 +19425,9 @@ qemuDomainSetTime(virDomainPtr dom,
 
     priv = vm->privateData;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if (qemuDomainObjBeginJobWithAgent(driver, vm,
+                                       QEMU_JOB_MODIFY,
+                                       QEMU_AGENT_JOB_MODIFY) < 0)
         goto cleanup;
 
     if (virDomainObjCheckActive(vm) < 0)
@@ -19447,7 +19472,7 @@ qemuDomainSetTime(virDomainPtr dom,
     ret = 0;
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndJobWithAgent(driver, vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -19473,7 +19498,7 @@ qemuDomainFSFreeze(virDomainPtr dom,
     if (virDomainFSFreezeEnsureACL(dom->conn, vm->def) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
         goto cleanup;
 
     if (virDomainObjCheckActive(vm) < 0)
@@ -19482,7 +19507,7 @@ qemuDomainFSFreeze(virDomainPtr dom,
     ret = qemuDomainSnapshotFSFreeze(driver, vm, mountpoints, nmountpoints);
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndAgentJob(vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -19514,7 +19539,7 @@ qemuDomainFSThaw(virDomainPtr dom,
     if (virDomainFSThawEnsureACL(dom->conn, vm->def) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
         goto cleanup;
 
     if (virDomainObjCheckActive(vm) < 0)
@@ -19523,7 +19548,7 @@ qemuDomainFSThaw(virDomainPtr dom,
     ret = qemuDomainSnapshotFSThaw(driver, vm, true);
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndAgentJob(vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -20547,7 +20572,7 @@ qemuDomainGetFSInfo(virDomainPtr dom,
     if (virDomainGetFSInfoEnsureACL(dom->conn, vm->def) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
         goto cleanup;
 
     if (virDomainObjCheckActive(vm) < 0)
@@ -20567,7 +20592,7 @@ qemuDomainGetFSInfo(virDomainPtr dom,
     qemuDomainObjExitAgent(vm, agent);
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndAgentJob(vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -20604,7 +20629,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom,
         break;
 
     case VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT:
-        if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+        if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
             goto cleanup;
 
         if (!qemuDomainAgentAvailable(vm, true))
@@ -20615,7 +20640,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom,
         qemuDomainObjExitAgent(vm, agent);
 
     endjob:
-        qemuDomainObjEndJob(driver, vm);
+        qemuDomainObjEndAgentJob(vm);
 
         break;
 
@@ -20816,7 +20841,7 @@ qemuDomainSetUserPassword(virDomainPtr dom,
     if (virDomainSetUserPasswordEnsureACL(dom->conn, vm->def) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
         goto cleanup;
 
     if (virDomainObjCheckActive(vm) < 0)
@@ -20836,7 +20861,7 @@ qemuDomainSetUserPassword(virDomainPtr dom,
     ret = 0;
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndAgentJob(vm);
 
  cleanup:
     virDomainObjEndAPI(&vm);
@@ -21077,7 +21102,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom,
     if (virDomainGetGuestVcpusEnsureACL(dom->conn, vm->def) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
         goto cleanup;
 
     if (!qemuDomainAgentAvailable(vm, true))
@@ -21096,7 +21121,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom,
     ret = 0;
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndAgentJob(vm);
 
  cleanup:
     VIR_FREE(info);
@@ -21136,7 +21161,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom,
     if (virDomainSetGuestVcpusEnsureACL(dom->conn, vm->def) < 0)
         goto cleanup;
 
-    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
         goto cleanup;
 
     if (!qemuDomainAgentAvailable(vm, true))
@@ -21182,7 +21207,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom,
     qemuDomainObjExitAgent(vm, agent);
 
  endjob:
-    qemuDomainObjEndJob(driver, vm);
+    qemuDomainObjEndAgentJob(vm);
 
  cleanup:
     VIR_FREE(info);
-- 
2.16.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [v2 RESEND 3/3] qemu: Switch code to use new agent job APIs
Posted by Jiri Denemark 7 years ago
On Tue, Jun 19, 2018 at 08:38:02 +0200, Michal Privoznik wrote:
> There are two sets of functions here:
> 1) some functions talk on both monitor and agent monitor,
> 2) some functions only talk on agent monitor.
> 
> For functions from set 1) we need to use
> qemuDomainObjBeginJobWithAgent() and for functions from set 2) we
> need to use qemuDomainObjBeginAgentJob() only.
> 
> Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
> ---
>  src/qemu/qemu_driver.c | 91 ++++++++++++++++++++++++++++++++------------------
>  1 file changed, 58 insertions(+), 33 deletions(-)
> 
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index 3abbe41895..cffd4c928a 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -1954,6 +1954,7 @@ static int qemuDomainShutdownFlags(virDomainPtr dom, unsigned int flags)
>      bool useAgent = false, agentRequested, acpiRequested;
>      bool isReboot = false;
>      bool agentForced;
> +    bool agentJob = false;
>      int agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN;
>  
>      virCheckFlags(VIR_DOMAIN_SHUTDOWN_ACPI_POWER_BTN |
> @@ -1980,9 +1981,14 @@ static int qemuDomainShutdownFlags(virDomainPtr dom, unsigned int flags)
>      if (virDomainShutdownFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
>          goto cleanup;
>  
> -    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
> +    if ((!useAgent && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) ||
> +        (useAgent && qemuDomainObjBeginJobWithAgent(driver, vm,
> +                                                    QEMU_JOB_MODIFY,
> +                                                    QEMU_AGENT_JOB_MODIFY) < 0))

Looks a bit hard to parse, it would be easier to just call
qemuDomainObjBeginJobWithAgent:

    qemuDomainAgentJob agentJob = QEMU_AGENT_JOB_NONE;

    if (useAgent)
        agentJob = QEMU_AGENT_JOB_MODIFY;

    if (qemuDomainObjBeginJobWithAgent(driver, vm, QEMU_JOB_MODIFY,
                                       agentJob) < 0)
        goto cleanup;

Perhaps it would even make sense to document this usage if the caller
does not always need to talk to the agent.

> @@ -2026,7 +2032,10 @@ static int qemuDomainShutdownFlags(virDomainPtr dom, unsigned int flags)
>      }
>  
>   endjob:
> -    qemuDomainObjEndJob(driver, vm);
> +    if (agentJob)
> +        qemuDomainObjEndJobWithAgent(driver, vm);
> +    else
> +        qemuDomainObjEndJob(driver, vm);

And this would still work even with the suggested changes.

>  
>   cleanup:
>      virDomainObjEndAPI(&vm);
> @@ -2049,6 +2058,7 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags)
>      bool useAgent = false, agentRequested, acpiRequested;
>      bool isReboot = true;
>      bool agentForced;
> +    bool agentJob = false;
>      int agentFlag = QEMU_AGENT_SHUTDOWN_REBOOT;
>  
>      virCheckFlags(VIR_DOMAIN_REBOOT_ACPI_POWER_BTN |
> @@ -2075,9 +2085,14 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags)
>      if (virDomainRebootEnsureACL(dom->conn, vm->def, flags) < 0)
>          goto cleanup;
>  
> -    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
> +    if ((!useAgent && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) ||
> +        (useAgent && qemuDomainObjBeginJobWithAgent(driver, vm,
> +                                                    QEMU_JOB_MODIFY,
> +                                                    QEMU_AGENT_JOB_MODIFY) < 0))

The same pattern could be used here.

>          goto cleanup;
>  
> +    agentJob = useAgent;
> +
>      agentForced = agentRequested && !acpiRequested;
>      if (!qemuDomainAgentAvailable(vm, agentForced)) {
>          if (agentForced)
> @@ -2115,7 +2130,10 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags)
>      }
>  
>   endjob:
> -    qemuDomainObjEndJob(driver, vm);
> +    if (agentJob)
> +        qemuDomainObjEndJobWithAgent(driver, vm);
> +    else
> +        qemuDomainObjEndJob(driver, vm);
>  
>   cleanup:
>      virDomainObjEndAPI(&vm);
> @@ -4949,6 +4967,7 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
>      virDomainDefPtr def;
>      virDomainDefPtr persistentDef;
>      bool hotpluggable = !!(flags & VIR_DOMAIN_VCPU_HOTPLUGGABLE);
> +    bool useAgent = !!(flags & VIR_DOMAIN_VCPU_GUEST);
>      int ret = -1;
>  
>      virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
> @@ -4963,13 +4982,14 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
>      if (virDomainSetVcpusFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
>          goto cleanup;
>  
> -    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
> +    if ((!useAgent && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) ||
> +        (useAgent && qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0))
>          goto cleanup;

And here.

>  
>      if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
>          goto endjob;
>  
> -    if (flags & VIR_DOMAIN_VCPU_GUEST)
> +    if (useAgent)
>          ret = qemuDomainSetVcpusAgent(vm, nvcpus);
>      else if (flags & VIR_DOMAIN_VCPU_MAXIMUM)
>          ret = qemuDomainSetVcpusMax(driver, def, persistentDef, nvcpus);
> @@ -4978,7 +4998,10 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
>                                           nvcpus, hotpluggable);
>  
>   endjob:
> -    qemuDomainObjEndJob(driver, vm);
> +    if (useAgent)
> +        qemuDomainObjEndAgentJob(vm);
> +    else
> +        qemuDomainObjEndJob(driver, vm);
>  
>   cleanup:
>      virDomainObjEndAPI(&vm);
> @@ -5429,7 +5452,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
>          goto cleanup;
>  
>      if (flags & VIR_DOMAIN_VCPU_GUEST) {
> -        if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
> +        if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
>              goto cleanup;
>  
>          if (!virDomainObjIsActive(vm)) {
> @@ -5447,7 +5470,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
>          qemuDomainObjExitAgent(vm, agent);
>  
>   endjob:
> -        qemuDomainObjEndJob(driver, vm);
> +        qemuDomainObjEndAgentJob(vm);
>  
>          if (ncpuinfo < 0)
>              goto cleanup;

I'd expect changes to qemuDomainSnapshotCreateActiveExternal here since
it calls qemuDomainSnapshotFSFreeze and qemuDomainSnapshotFSThaw from an
async job without acquiring a nested job, which looks wrong. And this
patch should change it to acquire an agent job before talking to the
agent.

> @@ -18954,7 +18977,7 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom,
>      if (virDomainPMSuspendForDurationEnsureACL(dom->conn, vm->def) < 0)
>          goto cleanup;
>  
> -    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
> +    if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
>          goto cleanup;
>  
>      if (virDomainObjCheckActive(vm) < 0)
...

The rest looks OK.

Jirka

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [v2 RESEND 3/3] qemu: Switch code to use new agent job APIs
Posted by Michal Prívozník 7 years ago
On 06/19/2018 05:05 PM, Jiri Denemark wrote:
> On Tue, Jun 19, 2018 at 08:38:02 +0200, Michal Privoznik wrote:
>> There are two sets of functions here:
>> 1) some functions talk on both monitor and agent monitor,
>> 2) some functions only talk on agent monitor.
>>
>> For functions from set 1) we need to use
>> qemuDomainObjBeginJobWithAgent() and for functions from set 2) we
>> need to use qemuDomainObjBeginAgentJob() only.
>>
>> Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
>> ---
>>  src/qemu/qemu_driver.c | 91 ++++++++++++++++++++++++++++++++------------------
>>  1 file changed, 58 insertions(+), 33 deletions(-)
>>
>> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
>> index 3abbe41895..cffd4c928a 100644
>> --- a/src/qemu/qemu_driver.c
>> +++ b/src/qemu/qemu_driver.c
>> @@ -1954,6 +1954,7 @@ static int qemuDomainShutdownFlags(virDomainPtr dom, unsigned int flags)
>>      bool useAgent = false, agentRequested, acpiRequested;
>>      bool isReboot = false;
>>      bool agentForced;
>> +    bool agentJob = false;
>>      int agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN;
>>  
>>      virCheckFlags(VIR_DOMAIN_SHUTDOWN_ACPI_POWER_BTN |
>> @@ -1980,9 +1981,14 @@ static int qemuDomainShutdownFlags(virDomainPtr dom, unsigned int flags)
>>      if (virDomainShutdownFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
>>          goto cleanup;
>>  
>> -    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
>> +    if ((!useAgent && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) ||
>> +        (useAgent && qemuDomainObjBeginJobWithAgent(driver, vm,
>> +                                                    QEMU_JOB_MODIFY,
>> +                                                    QEMU_AGENT_JOB_MODIFY) < 0))
> 
> Looks a bit hard to parse, it would be easier to just call
> qemuDomainObjBeginJobWithAgent:
> 
>     qemuDomainAgentJob agentJob = QEMU_AGENT_JOB_NONE;
> 
>     if (useAgent)
>         agentJob = QEMU_AGENT_JOB_MODIFY;
> 
>     if (qemuDomainObjBeginJobWithAgent(driver, vm, QEMU_JOB_MODIFY,
>                                        agentJob) < 0)
>         goto cleanup;
> 
> Perhaps it would even make sense to document this usage if the caller
> does not always need to talk to the agent.
> 
>> @@ -2026,7 +2032,10 @@ static int qemuDomainShutdownFlags(virDomainPtr dom, unsigned int flags)
>>      }
>>  
>>   endjob:
>> -    qemuDomainObjEndJob(driver, vm);
>> +    if (agentJob)
>> +        qemuDomainObjEndJobWithAgent(driver, vm);
>> +    else
>> +        qemuDomainObjEndJob(driver, vm);
> 
> And this would still work even with the suggested changes.

Okay.
> 
>>  
>>   cleanup:
>>      virDomainObjEndAPI(&vm);
>> @@ -2049,6 +2058,7 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags)
>>      bool useAgent = false, agentRequested, acpiRequested;
>>      bool isReboot = true;
>>      bool agentForced;
>> +    bool agentJob = false;
>>      int agentFlag = QEMU_AGENT_SHUTDOWN_REBOOT;
>>  
>>      virCheckFlags(VIR_DOMAIN_REBOOT_ACPI_POWER_BTN |
>> @@ -2075,9 +2085,14 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags)
>>      if (virDomainRebootEnsureACL(dom->conn, vm->def, flags) < 0)
>>          goto cleanup;
>>  
>> -    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
>> +    if ((!useAgent && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) ||
>> +        (useAgent && qemuDomainObjBeginJobWithAgent(driver, vm,
>> +                                                    QEMU_JOB_MODIFY,
>> +                                                    QEMU_AGENT_JOB_MODIFY) < 0))
> 
> The same pattern could be used here.
> 
>>          goto cleanup;
>>  
>> +    agentJob = useAgent;
>> +
>>      agentForced = agentRequested && !acpiRequested;
>>      if (!qemuDomainAgentAvailable(vm, agentForced)) {
>>          if (agentForced)
>> @@ -2115,7 +2130,10 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags)
>>      }
>>  
>>   endjob:
>> -    qemuDomainObjEndJob(driver, vm);
>> +    if (agentJob)
>> +        qemuDomainObjEndJobWithAgent(driver, vm);
>> +    else
>> +        qemuDomainObjEndJob(driver, vm);
>>  
>>   cleanup:
>>      virDomainObjEndAPI(&vm);
>> @@ -4949,6 +4967,7 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
>>      virDomainDefPtr def;
>>      virDomainDefPtr persistentDef;
>>      bool hotpluggable = !!(flags & VIR_DOMAIN_VCPU_HOTPLUGGABLE);
>> +    bool useAgent = !!(flags & VIR_DOMAIN_VCPU_GUEST);
>>      int ret = -1;
>>  
>>      virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
>> @@ -4963,13 +4982,14 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
>>      if (virDomainSetVcpusFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
>>          goto cleanup;
>>  
>> -    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
>> +    if ((!useAgent && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) ||
>> +        (useAgent && qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0))
>>          goto cleanup;
> 
> And here.

Actually no. This one is different to the previous two places. This one
is either grab domain job OR agent job but not both at the same time
(which is what previous places do). I could do:

if (flags & VIR_DOMAIN_VCPU_GUEST)
  agentJob = QEMU_AGENT_JOB_MODIFY
else
  job = QEMU_JOB_MODIFY;

and then

if (qemuDomainObjBeginJobWithAgent(job, agentJob) < 0) ...

It's rather stretching of the API but whatever.

> 
>>  
>>      if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
>>          goto endjob;
>>  
>> -    if (flags & VIR_DOMAIN_VCPU_GUEST)
>> +    if (useAgent)
>>          ret = qemuDomainSetVcpusAgent(vm, nvcpus);
>>      else if (flags & VIR_DOMAIN_VCPU_MAXIMUM)
>>          ret = qemuDomainSetVcpusMax(driver, def, persistentDef, nvcpus);
>> @@ -4978,7 +4998,10 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
>>                                           nvcpus, hotpluggable);
>>  
>>   endjob:
>> -    qemuDomainObjEndJob(driver, vm);
>> +    if (useAgent)
>> +        qemuDomainObjEndAgentJob(vm);
>> +    else
>> +        qemuDomainObjEndJob(driver, vm);
>>  
>>   cleanup:
>>      virDomainObjEndAPI(&vm);
>> @@ -5429,7 +5452,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
>>          goto cleanup;
>>  
>>      if (flags & VIR_DOMAIN_VCPU_GUEST) {
>> -        if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
>> +        if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
>>              goto cleanup;
>>  
>>          if (!virDomainObjIsActive(vm)) {
>> @@ -5447,7 +5470,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
>>          qemuDomainObjExitAgent(vm, agent);
>>  
>>   endjob:
>> -        qemuDomainObjEndJob(driver, vm);
>> +        qemuDomainObjEndAgentJob(vm);
>>  
>>          if (ncpuinfo < 0)
>>              goto cleanup;
> 
> I'd expect changes to qemuDomainSnapshotCreateActiveExternal here since
> it calls qemuDomainSnapshotFSFreeze and qemuDomainSnapshotFSThaw from an
> async job without acquiring a nested job, which looks wrong. And this
> patch should change it to acquire an agent job before talking to the
> agent.

Okay, I'll fix it and post v3.

Michal

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [v2 RESEND 3/3] qemu: Switch code to use new agent job APIs
Posted by Jiri Denemark 7 years ago
On Tue, Jun 19, 2018 at 19:03:24 +0200, Michal Prívozník wrote:
> On 06/19/2018 05:05 PM, Jiri Denemark wrote:
> > On Tue, Jun 19, 2018 at 08:38:02 +0200, Michal Privoznik wrote:
> >> There are two sets of functions here:
> >> 1) some functions talk on both monitor and agent monitor,
> >> 2) some functions only talk on agent monitor.
> >>
> >> For functions from set 1) we need to use
> >> qemuDomainObjBeginJobWithAgent() and for functions from set 2) we
> >> need to use qemuDomainObjBeginAgentJob() only.
> >>
> >> Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
> >> ---
> >>  src/qemu/qemu_driver.c | 91 ++++++++++++++++++++++++++++++++------------------
> >>  1 file changed, 58 insertions(+), 33 deletions(-)
> >>
> >> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> >> index 3abbe41895..cffd4c928a 100644
> >> --- a/src/qemu/qemu_driver.c
> >> +++ b/src/qemu/qemu_driver.c
...
> >> @@ -4949,6 +4967,7 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
> >>      virDomainDefPtr def;
> >>      virDomainDefPtr persistentDef;
> >>      bool hotpluggable = !!(flags & VIR_DOMAIN_VCPU_HOTPLUGGABLE);
> >> +    bool useAgent = !!(flags & VIR_DOMAIN_VCPU_GUEST);
> >>      int ret = -1;
> >>  
> >>      virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
> >> @@ -4963,13 +4982,14 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
> >>      if (virDomainSetVcpusFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
> >>          goto cleanup;
> >>  
> >> -    if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
> >> +    if ((!useAgent && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) ||
> >> +        (useAgent && qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0))
> >>          goto cleanup;
> > 
> > And here.
> 
> Actually no. This one is different to the previous two places. This one
> is either grab domain job OR agent job but not both at the same time
> (which is what previous places do).

Ah right, I misread qemuDomainObjBeginAgentJob as
qemuDomainObjBeginJobWithAgent. In this case, I'd just modify the code a
bit to make it clearer the two cases are mutually exclusive, e.g.:

    int rc;

    if (useAgent)
        rc = qemuDomainObjBeginAgentJob();
    else
        rc = qemuDomainObjBeginJob();

    if (rc < 0)
        goto cleanup;

Jirka

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list