From nobody Wed May 14 13:33:26 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) client-ip=209.132.183.28; envelope-from=libvir-list-bounces@redhat.com; helo=mx1.redhat.com; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of redhat.com designates 209.132.183.28 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by mx.zohomail.com with SMTPS id 152378059920513.691120041036243; Sun, 15 Apr 2018 01:23:19 -0700 (PDT) Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id D87EF13A9A; Sun, 15 Apr 2018 08:23:16 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 91E1D17D43; Sun, 15 Apr 2018 08:23:16 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id 204F7180BADA; Sun, 15 Apr 2018 08:23:16 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.rdu2.redhat.com [10.11.54.4]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id w3BEgHeK008959 for ; Wed, 11 Apr 2018 10:42:17 -0400 Received: by smtp.corp.redhat.com (Postfix) id 9F9E32026E0E; Wed, 11 Apr 2018 14:42:17 +0000 (UTC) Received: from virval.usersys.redhat.com (unknown [10.43.2.20]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 474212026E03 for ; Wed, 11 Apr 2018 14:42:17 +0000 (UTC) Received: by virval.usersys.redhat.com (Postfix, from userid 500) id 35EC1100D55; Wed, 11 Apr 2018 16:42:11 +0200 (CEST) From: Jiri Denemark To: libvir-list@redhat.com Date: Wed, 11 Apr 2018 16:41:57 +0200 Message-Id: In-Reply-To: References: In-Reply-To: References: Mail-Followup-To: libvir-list@redhat.com X-Scanned-By: MIMEDefang 2.78 on 10.11.54.4 X-loop: libvir-list@redhat.com Subject: [libvirt] [PATCH v2 67/73] qemu: Store API flags for async jobs in qemuDomainJobObj X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.29]); Sun, 15 Apr 2018 08:23:17 +0000 (UTC) X-ZohoMail: RSF_0 Z_629925259 SPT_0 Content-Type: text/plain; charset="utf-8" When an async job is running, we sometimes need to know how it was started to distinguish between several types of the job, e.g., post-copy vs. normal migration. So far we added a specific bool item to qemuDomainJobObj for such cases, which doesn't scale very well and storing such bools in status XML would be painful so we didn't do it. A better approach is to store the flags passed to the API which started the async job, which can be easily stored in status XML. Signed-off-by: Jiri Denemark Reviewed-by: J=EF=BF=BDn Tomko --- src/qemu/qemu_domain.c | 10 ++++++++-- src/qemu/qemu_domain.h | 4 +++- src/qemu/qemu_driver.c | 31 +++++++++++++++++++------------ src/qemu/qemu_migration.c | 20 +++++++++++++------- src/qemu/qemu_process.c | 5 +++-- src/qemu/qemu_process.h | 3 ++- 6 files changed, 48 insertions(+), 25 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index e3f58e45cb..bf04053607 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -339,6 +339,7 @@ qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv) VIR_FREE(job->current); qemuMigrationParamsFree(job->migParams); job->migParams =3D NULL; + job->apiFlags =3D 0; } =20 void @@ -354,6 +355,7 @@ qemuDomainObjRestoreJob(virDomainObjPtr obj, job->asyncOwner =3D priv->job.asyncOwner; job->phase =3D priv->job.phase; VIR_STEAL_PTR(job->migParams, priv->job.migParams); + job->apiFlags =3D priv->job.apiFlags; =20 qemuDomainObjResetJob(priv); qemuDomainObjResetAsyncJob(priv); @@ -5805,7 +5807,8 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, asyncDuration =3D now - priv->job.asyncStarted; =20 VIR_WARN("Cannot start job (%s, %s) for domain %s; " - "current job is (%s, %s) owned by (%llu %s, %llu %s) " + "current job is (%s, %s) " + "owned by (%llu %s, %llu %s (flags=3D0x%lx)) " "for (%llus, %llus)", qemuDomainJobTypeToString(job), qemuDomainAsyncJobTypeToString(asyncJob), @@ -5814,6 +5817,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, qemuDomainAsyncJobTypeToString(priv->job.asyncJob), priv->job.owner, NULLSTR(priv->job.ownerAPI), priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI), + priv->job.apiFlags, duration / 1000, asyncDuration / 1000); =20 if (nested || qemuDomainNestedJobAllowed(priv, job)) @@ -5877,7 +5881,8 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver, int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainAsyncJob asyncJob, - virDomainJobOperation operation) + virDomainJobOperation operation, + unsigned long apiFlags) { qemuDomainObjPrivatePtr priv; =20 @@ -5887,6 +5892,7 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr drive= r, =20 priv =3D obj->privateData; priv->job.current->operation =3D operation; + priv->job.apiFlags =3D apiFlags; return 0; } =20 diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 2bb3e0a788..2146ff00a9 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -182,6 +182,7 @@ struct _qemuDomainJobObj { bool dumpCompleted; /* dump completed */ =20 qemuMigrationParamsPtr migParams; + unsigned long apiFlags; /* flags passed to the API which started the a= sync job */ }; =20 typedef void (*qemuDomainCleanupCallback)(virQEMUDriverPtr driver, @@ -493,7 +494,8 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver, int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainAsyncJob asyncJob, - virDomainJobOperation operation) + virDomainJobOperation operation, + unsigned long apiFlags) ATTRIBUTE_RETURN_CHECK; int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, virDomainObjPtr obj, diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 8917beae35..198020a0e4 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -267,7 +267,7 @@ qemuAutostartDomain(virDomainObjPtr vm, if (vm->autostart && !virDomainObjIsActive(vm)) { if (qemuProcessBeginJob(driver, vm, - VIR_DOMAIN_JOB_OPERATION_START) < 0) { + VIR_DOMAIN_JOB_OPERATION_START, flags) < 0= ) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Failed to start job on VM '%s': %s"), vm->def->name, virGetLastErrorMessage()); @@ -1779,7 +1779,8 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr= conn, virObjectRef(vm); def =3D NULL; =20 - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START) < = 0) { + if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START, + flags) < 0) { qemuDomainRemoveInactiveJob(driver, vm); goto cleanup; } @@ -3345,7 +3346,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, goto cleanup; =20 if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE, - VIR_DOMAIN_JOB_OPERATION_SAVE) < 0) + VIR_DOMAIN_JOB_OPERATION_SAVE, flags) <= 0) goto cleanup; =20 if (!virDomainObjIsActive(vm)) { @@ -3953,7 +3954,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, =20 if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP, - VIR_DOMAIN_JOB_OPERATION_DUMP) < 0) + VIR_DOMAIN_JOB_OPERATION_DUMP, + flags) < 0) goto cleanup; =20 if (!virDomainObjIsActive(vm)) { @@ -4178,7 +4180,8 @@ processWatchdogEvent(virQEMUDriverPtr driver, case VIR_DOMAIN_WATCHDOG_ACTION_DUMP: if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP, - VIR_DOMAIN_JOB_OPERATION_DUMP) < 0)= { + VIR_DOMAIN_JOB_OPERATION_DUMP, + flags) < 0) { goto cleanup; } =20 @@ -4264,9 +4267,10 @@ processGuestPanicEvent(virQEMUDriverPtr driver, virObjectEventPtr event =3D NULL; virQEMUDriverConfigPtr cfg =3D virQEMUDriverGetConfig(driver); bool removeInactive =3D false; + unsigned long flags =3D VIR_DUMP_MEMORY_ONLY; =20 if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP, - VIR_DOMAIN_JOB_OPERATION_DUMP) < 0) + VIR_DOMAIN_JOB_OPERATION_DUMP, flags) <= 0) goto cleanup; =20 if (!virDomainObjIsActive(vm)) { @@ -4297,7 +4301,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, =20 switch (action) { case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_DESTROY: - if (doCoreDumpToAutoDumpPath(driver, vm, VIR_DUMP_MEMORY_ONLY) < 0) + if (doCoreDumpToAutoDumpPath(driver, vm, flags) < 0) goto endjob; ATTRIBUTE_FALLTHROUGH; =20 @@ -4314,7 +4318,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, break; =20 case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_RESTART: - if (doCoreDumpToAutoDumpPath(driver, vm, VIR_DUMP_MEMORY_ONLY) < 0) + if (doCoreDumpToAutoDumpPath(driver, vm, flags) < 0) goto endjob; ATTRIBUTE_FALLTHROUGH; =20 @@ -6769,7 +6773,8 @@ qemuDomainRestoreFlags(virConnectPtr conn, priv->hookRun =3D true; } =20 - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_RESTORE) = < 0) + if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_RESTORE, + flags) < 0) goto cleanup; =20 ret =3D qemuDomainSaveImageStartVM(conn, driver, vm, &fd, data, path, @@ -7357,7 +7362,8 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned = int flags) if (virDomainCreateWithFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; =20 - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START) < = 0) + if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START, + flags) < 0) goto cleanup; =20 if (virDomainObjIsActive(vm)) { @@ -15208,7 +15214,7 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain, * 'savevm' blocks the monitor. External snapshot will then modify the * job mask appropriately. */ if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT, - VIR_DOMAIN_JOB_OPERATION_SNAPSHOT) < 0) + VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flag= s) < 0) goto cleanup; =20 qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); @@ -15807,7 +15813,8 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr sna= pshot, } =20 if (qemuProcessBeginJob(driver, vm, - VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT) < 0) + VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT, + flags) < 0) goto cleanup; =20 if (!(snap =3D qemuSnapObjFromSnapshot(vm, snapshot))) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 42194c0ea8..431875c762 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -80,7 +80,8 @@ VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE= _LAST, static int qemuMigrationJobStart(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob job) + qemuDomainAsyncJob job, + unsigned long apiFlags) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK; =20 static void @@ -1985,7 +1986,8 @@ qemuMigrationSrcBegin(virConnectPtr conn, qemuDomainAsyncJob asyncJob; =20 if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) { - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT= ) < 0) + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + flags) < 0) goto cleanup; asyncJob =3D QEMU_ASYNC_JOB_MIGRATION_OUT; } else { @@ -2320,7 +2322,8 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, !!(flags & VIR_MIGRATE_NON_SHARED= _INC)) < 0) goto cleanup; =20 - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + flags) < 0) goto cleanup; qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE); =20 @@ -4440,7 +4443,8 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, virQEMUDriverConfigPtr cfg =3D virQEMUDriverGetConfig(driver); qemuDomainObjPrivatePtr priv =3D vm->privateData; =20 - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < = 0) + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + flags) < 0) goto cleanup; =20 if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { @@ -4552,7 +4556,8 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, =20 /* If we didn't start the job in the begin phase, start it now. */ if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) { - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT= ) < 0) + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + flags) < 0) goto cleanup; } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)= ) { goto cleanup; @@ -5291,7 +5296,8 @@ qemuMigrationSrcCancel(virQEMUDriverPtr driver, static int qemuMigrationJobStart(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob job) + qemuDomainAsyncJob job, + unsigned long apiFlags) { qemuDomainObjPrivatePtr priv =3D vm->privateData; virDomainJobOperation op; @@ -5307,7 +5313,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, JOB_MASK(QEMU_JOB_MIGRATION_OP); } =20 - if (qemuDomainObjBeginAsyncJob(driver, vm, job, op) < 0) + if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0) return -1; =20 priv->job.current->statsType =3D QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 6aa4f4ed71..b4bb7eb8a5 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -4189,10 +4189,11 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps, int qemuProcessBeginJob(virQEMUDriverPtr driver, virDomainObjPtr vm, - virDomainJobOperation operation) + virDomainJobOperation operation, + unsigned long apiFlags) { if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START, - operation) < 0) + operation, apiFlags) < 0) return -1; =20 qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index 2741115673..9dd5c97642 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -68,7 +68,8 @@ void qemuProcessIncomingDefFree(qemuProcessIncomingDefPtr= inc); =20 int qemuProcessBeginJob(virQEMUDriverPtr driver, virDomainObjPtr vm, - virDomainJobOperation operation); + virDomainJobOperation operation, + unsigned long apiFlags); void qemuProcessEndJob(virQEMUDriverPtr driver, virDomainObjPtr vm); =20 --=20 2.17.0 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list