src/qemu/qemu_capabilities.c | 57 +++++++++++++++++++++++++++++++++++++------- src/qemu/qemu_capspriv.h | 1 + tests/qemucapsprobe.c | 2 +- 3 files changed, 50 insertions(+), 10 deletions(-)
Whenever a different kernel is booted, some capabilities related to KVM
(such as CPUID bits) may change. We need to refresh the cache to see the
changes.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
---
Notes:
The capabilities may also change if a parameter passed to a kvm module
changes (kvm_intel.nested is a good example) so this is not a complete
solution, but we're hopefully getting closer to it :-)
src/qemu/qemu_capabilities.c | 57 +++++++++++++++++++++++++++++++++++++-------
src/qemu/qemu_capspriv.h | 1 +
tests/qemucapsprobe.c | 2 +-
3 files changed, 50 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index ab0ea8ec0d..2c573827f1 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -52,6 +52,7 @@
#include <unistd.h>
#include <sys/wait.h>
#include <stdarg.h>
+#include <sys/utsname.h>
#define VIR_FROM_THIS VIR_FROM_QEMU
@@ -510,6 +511,7 @@ struct _virQEMUCaps {
unsigned int libvirtVersion;
unsigned int microcodeVersion;
char *package;
+ char *kernelVersion;
virArch arch;
@@ -2303,6 +2305,9 @@ virQEMUCapsPtr virQEMUCapsNewCopy(virQEMUCapsPtr qemuCaps)
if (VIR_STRDUP(ret->package, qemuCaps->package) < 0)
goto error;
+ if (VIR_STRDUP(ret->kernelVersion, qemuCaps->kernelVersion) < 0)
+ goto error;
+
ret->arch = qemuCaps->arch;
if (qemuCaps->kvmCPUModels) {
@@ -2363,6 +2368,7 @@ void virQEMUCapsDispose(void *obj)
virBitmapFree(qemuCaps->flags);
VIR_FREE(qemuCaps->package);
+ VIR_FREE(qemuCaps->kernelVersion);
VIR_FREE(qemuCaps->binary);
VIR_FREE(qemuCaps->gicCapabilities);
@@ -3834,6 +3840,7 @@ struct _virQEMUCapsCachePriv {
gid_t runGid;
virArch hostArch;
unsigned int microcodeVersion;
+ char *kernelVersion;
};
typedef struct _virQEMUCapsCachePriv virQEMUCapsCachePriv;
typedef virQEMUCapsCachePriv *virQEMUCapsCachePrivPtr;
@@ -3845,6 +3852,7 @@ virQEMUCapsCachePrivFree(void *privData)
virQEMUCapsCachePrivPtr priv = privData;
VIR_FREE(priv->libDir);
+ VIR_FREE(priv->kernelVersion);
VIR_FREE(priv);
}
@@ -3970,6 +3978,12 @@ virQEMUCapsLoadCache(virArch hostArch,
goto cleanup;
}
+ if (virXPathBoolean("boolean(./kernelVersion)", ctxt) > 0) {
+ qemuCaps->kernelVersion = virXPathString("string(./kernelVersion)", ctxt);
+ if (!qemuCaps->kernelVersion)
+ goto cleanup;
+ }
+
if (!(str = virXPathString("string(./arch)", ctxt))) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("missing arch in QEMU capabilities cache"));
@@ -4248,6 +4262,10 @@ virQEMUCapsFormatCache(virQEMUCapsPtr qemuCaps)
virBufferAsprintf(&buf, "<package>%s</package>\n",
qemuCaps->package);
+ if (qemuCaps->kernelVersion)
+ virBufferAsprintf(&buf, "<kernelVersion>%s</kernelVersion>\n",
+ qemuCaps->kernelVersion);
+
virBufferAsprintf(&buf, "<arch>%s</arch>\n",
virArchToString(qemuCaps->arch));
@@ -4385,14 +4403,24 @@ virQEMUCapsIsValid(void *data,
return false;
}
- if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_KVM) &&
- priv->microcodeVersion != qemuCaps->microcodeVersion) {
- VIR_DEBUG("Outdated capabilities for '%s': microcode version changed "
- "(%u vs %u)",
- qemuCaps->binary,
- priv->microcodeVersion,
- qemuCaps->microcodeVersion);
- return false;
+ if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_KVM)) {
+ if (priv->microcodeVersion != qemuCaps->microcodeVersion) {
+ VIR_DEBUG("Outdated capabilities for '%s': microcode version "
+ "changed (%u vs %u)",
+ qemuCaps->binary,
+ priv->microcodeVersion,
+ qemuCaps->microcodeVersion);
+ return false;
+ }
+
+ if (STRNEQ_NULLABLE(priv->kernelVersion, qemuCaps->kernelVersion)) {
+ VIR_DEBUG("Outdated capabilities for '%s': kernel version changed "
+ "('%s' vs '%s')",
+ qemuCaps->binary,
+ priv->kernelVersion,
+ qemuCaps->kernelVersion);
+ return false;
+ }
}
return true;
@@ -5228,6 +5256,7 @@ virQEMUCapsNewForBinaryInternal(virArch hostArch,
uid_t runUid,
gid_t runGid,
unsigned int microcodeVersion,
+ const char *kernelVersion,
bool qmpOnly)
{
virQEMUCapsPtr qemuCaps;
@@ -5284,9 +5313,13 @@ virQEMUCapsNewForBinaryInternal(virArch hostArch,
virQEMUCapsInitHostCPUModel(qemuCaps, hostArch, VIR_DOMAIN_VIRT_KVM);
virQEMUCapsInitHostCPUModel(qemuCaps, hostArch, VIR_DOMAIN_VIRT_QEMU);
- if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_KVM))
+ if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_KVM)) {
qemuCaps->microcodeVersion = microcodeVersion;
+ if (VIR_STRDUP(qemuCaps->kernelVersion, kernelVersion) < 0)
+ goto error;
+ }
+
cleanup:
VIR_FREE(qmperr);
return qemuCaps;
@@ -5309,6 +5342,7 @@ virQEMUCapsNewData(const char *binary,
priv->runUid,
priv->runGid,
priv->microcodeVersion,
+ priv->kernelVersion,
false);
}
@@ -5397,6 +5431,7 @@ virQEMUCapsCacheNew(const char *libDir,
char *capsCacheDir = NULL;
virFileCachePtr cache = NULL;
virQEMUCapsCachePrivPtr priv = NULL;
+ struct utsname uts = { 0 };
if (virAsprintf(&capsCacheDir, "%s/capabilities", cacheDir) < 0)
goto error;
@@ -5417,6 +5452,10 @@ virQEMUCapsCacheNew(const char *libDir,
priv->runGid = runGid;
priv->microcodeVersion = microcodeVersion;
+ if (uname(&uts) == 0 &&
+ virAsprintf(&priv->kernelVersion, "%s %s", uts.release, uts.version) < 0)
+ goto error;
+
cleanup:
VIR_FREE(capsCacheDir);
return cache;
diff --git a/src/qemu/qemu_capspriv.h b/src/qemu/qemu_capspriv.h
index 98d163b920..222f3368e3 100644
--- a/src/qemu/qemu_capspriv.h
+++ b/src/qemu/qemu_capspriv.h
@@ -37,6 +37,7 @@ virQEMUCapsNewForBinaryInternal(virArch hostArch,
uid_t runUid,
gid_t runGid,
unsigned int microcodeVersion,
+ const char *kernelVersion,
bool qmpOnly);
int virQEMUCapsLoadCache(virArch hostArch,
diff --git a/tests/qemucapsprobe.c b/tests/qemucapsprobe.c
index a5f5a38b16..7d60246949 100644
--- a/tests/qemucapsprobe.c
+++ b/tests/qemucapsprobe.c
@@ -72,7 +72,7 @@ main(int argc, char **argv)
return EXIT_FAILURE;
if (!(caps = virQEMUCapsNewForBinaryInternal(VIR_ARCH_NONE, argv[1], "/tmp",
- -1, -1, 0, true)))
+ -1, -1, 0, NULL, true)))
return EXIT_FAILURE;
virObjectUnref(caps);
--
2.16.0
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
On Mon, Jan 22, 2018 at 11:46:14AM +0100, Jiri Denemark wrote: > Whenever a different kernel is booted, some capabilities related to KVM > (such as CPUID bits) may change. We need to refresh the cache to see the > changes. > > Signed-off-by: Jiri Denemark <jdenemar@redhat.com> > --- > > Notes: > The capabilities may also change if a parameter passed to a kvm module > changes (kvm_intel.nested is a good example) so this is not a complete > solution, but we're hopefully getting closer to it :-) You mean getting closer to a situation where we are effectively storing the cache on tmpfs, because we invalidate it on every reboot ! I think sometime soon we're going to need to consider if our cache invalidation approach is fundamentally broken. We have a huge amount of stuff we query from QEMU, but only a tiny amount is dependant on host kernel / microcode / kvm mod options. Should we go back to invalidating only when libvirt/qemu binary changes but then do partial invalidation of specific data items for kernel/microcode changes. It might require QEMU help to give us "promise" of which QMP commands return data that may be impacted by KVM / kernel. > src/qemu/qemu_capabilities.c | 57 +++++++++++++++++++++++++++++++++++++------- > src/qemu/qemu_capspriv.h | 1 + > tests/qemucapsprobe.c | 2 +- > 3 files changed, 50 insertions(+), 10 deletions(-) > > diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c > index ab0ea8ec0d..2c573827f1 100644 > --- a/src/qemu/qemu_capabilities.c > +++ b/src/qemu/qemu_capabilities.c > @@ -52,6 +52,7 @@ > #include <unistd.h> > #include <sys/wait.h> > #include <stdarg.h> > +#include <sys/utsname.h> > > #define VIR_FROM_THIS VIR_FROM_QEMU > > @@ -510,6 +511,7 @@ struct _virQEMUCaps { > unsigned int libvirtVersion; > unsigned int microcodeVersion; > char *package; > + char *kernelVersion; > > virArch arch; > > @@ -2303,6 +2305,9 @@ virQEMUCapsPtr virQEMUCapsNewCopy(virQEMUCapsPtr qemuCaps) > if (VIR_STRDUP(ret->package, qemuCaps->package) < 0) > goto error; > > + if (VIR_STRDUP(ret->kernelVersion, qemuCaps->kernelVersion) < 0) > + goto error; > + > ret->arch = qemuCaps->arch; > > if (qemuCaps->kvmCPUModels) { > @@ -2363,6 +2368,7 @@ void virQEMUCapsDispose(void *obj) > virBitmapFree(qemuCaps->flags); > > VIR_FREE(qemuCaps->package); > + VIR_FREE(qemuCaps->kernelVersion); > VIR_FREE(qemuCaps->binary); > > VIR_FREE(qemuCaps->gicCapabilities); > @@ -3834,6 +3840,7 @@ struct _virQEMUCapsCachePriv { > gid_t runGid; > virArch hostArch; > unsigned int microcodeVersion; > + char *kernelVersion; > }; > typedef struct _virQEMUCapsCachePriv virQEMUCapsCachePriv; > typedef virQEMUCapsCachePriv *virQEMUCapsCachePrivPtr; > @@ -3845,6 +3852,7 @@ virQEMUCapsCachePrivFree(void *privData) > virQEMUCapsCachePrivPtr priv = privData; > > VIR_FREE(priv->libDir); > + VIR_FREE(priv->kernelVersion); > VIR_FREE(priv); > } > > @@ -3970,6 +3978,12 @@ virQEMUCapsLoadCache(virArch hostArch, > goto cleanup; > } > > + if (virXPathBoolean("boolean(./kernelVersion)", ctxt) > 0) { > + qemuCaps->kernelVersion = virXPathString("string(./kernelVersion)", ctxt); > + if (!qemuCaps->kernelVersion) > + goto cleanup; > + } > + > if (!(str = virXPathString("string(./arch)", ctxt))) { > virReportError(VIR_ERR_INTERNAL_ERROR, "%s", > _("missing arch in QEMU capabilities cache")); > @@ -4248,6 +4262,10 @@ virQEMUCapsFormatCache(virQEMUCapsPtr qemuCaps) > virBufferAsprintf(&buf, "<package>%s</package>\n", > qemuCaps->package); > > + if (qemuCaps->kernelVersion) > + virBufferAsprintf(&buf, "<kernelVersion>%s</kernelVersion>\n", > + qemuCaps->kernelVersion); > + > virBufferAsprintf(&buf, "<arch>%s</arch>\n", > virArchToString(qemuCaps->arch)); > > @@ -4385,14 +4403,24 @@ virQEMUCapsIsValid(void *data, > return false; > } > > - if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_KVM) && > - priv->microcodeVersion != qemuCaps->microcodeVersion) { > - VIR_DEBUG("Outdated capabilities for '%s': microcode version changed " > - "(%u vs %u)", > - qemuCaps->binary, > - priv->microcodeVersion, > - qemuCaps->microcodeVersion); > - return false; > + if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_KVM)) { > + if (priv->microcodeVersion != qemuCaps->microcodeVersion) { > + VIR_DEBUG("Outdated capabilities for '%s': microcode version " > + "changed (%u vs %u)", > + qemuCaps->binary, > + priv->microcodeVersion, > + qemuCaps->microcodeVersion); > + return false; > + } > + > + if (STRNEQ_NULLABLE(priv->kernelVersion, qemuCaps->kernelVersion)) { > + VIR_DEBUG("Outdated capabilities for '%s': kernel version changed " > + "('%s' vs '%s')", > + qemuCaps->binary, > + priv->kernelVersion, > + qemuCaps->kernelVersion); > + return false; > + } > } > > return true; > @@ -5228,6 +5256,7 @@ virQEMUCapsNewForBinaryInternal(virArch hostArch, > uid_t runUid, > gid_t runGid, > unsigned int microcodeVersion, > + const char *kernelVersion, > bool qmpOnly) > { > virQEMUCapsPtr qemuCaps; > @@ -5284,9 +5313,13 @@ virQEMUCapsNewForBinaryInternal(virArch hostArch, > virQEMUCapsInitHostCPUModel(qemuCaps, hostArch, VIR_DOMAIN_VIRT_KVM); > virQEMUCapsInitHostCPUModel(qemuCaps, hostArch, VIR_DOMAIN_VIRT_QEMU); > > - if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_KVM)) > + if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_KVM)) { > qemuCaps->microcodeVersion = microcodeVersion; > > + if (VIR_STRDUP(qemuCaps->kernelVersion, kernelVersion) < 0) > + goto error; > + } > + > cleanup: > VIR_FREE(qmperr); > return qemuCaps; > @@ -5309,6 +5342,7 @@ virQEMUCapsNewData(const char *binary, > priv->runUid, > priv->runGid, > priv->microcodeVersion, > + priv->kernelVersion, > false); > } > > @@ -5397,6 +5431,7 @@ virQEMUCapsCacheNew(const char *libDir, > char *capsCacheDir = NULL; > virFileCachePtr cache = NULL; > virQEMUCapsCachePrivPtr priv = NULL; > + struct utsname uts = { 0 }; > > if (virAsprintf(&capsCacheDir, "%s/capabilities", cacheDir) < 0) > goto error; > @@ -5417,6 +5452,10 @@ virQEMUCapsCacheNew(const char *libDir, > priv->runGid = runGid; > priv->microcodeVersion = microcodeVersion; > > + if (uname(&uts) == 0 && > + virAsprintf(&priv->kernelVersion, "%s %s", uts.release, uts.version) < 0) > + goto error; > + > cleanup: > VIR_FREE(capsCacheDir); > return cache; > diff --git a/src/qemu/qemu_capspriv.h b/src/qemu/qemu_capspriv.h > index 98d163b920..222f3368e3 100644 > --- a/src/qemu/qemu_capspriv.h > +++ b/src/qemu/qemu_capspriv.h > @@ -37,6 +37,7 @@ virQEMUCapsNewForBinaryInternal(virArch hostArch, > uid_t runUid, > gid_t runGid, > unsigned int microcodeVersion, > + const char *kernelVersion, > bool qmpOnly); > > int virQEMUCapsLoadCache(virArch hostArch, > diff --git a/tests/qemucapsprobe.c b/tests/qemucapsprobe.c > index a5f5a38b16..7d60246949 100644 > --- a/tests/qemucapsprobe.c > +++ b/tests/qemucapsprobe.c > @@ -72,7 +72,7 @@ main(int argc, char **argv) > return EXIT_FAILURE; > > if (!(caps = virQEMUCapsNewForBinaryInternal(VIR_ARCH_NONE, argv[1], "/tmp", > - -1, -1, 0, true))) > + -1, -1, 0, NULL, true))) > return EXIT_FAILURE; > > virObjectUnref(caps); Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Mon, Jan 22, 2018 at 10:57:57 +0000, Daniel P. Berrange wrote: > On Mon, Jan 22, 2018 at 11:46:14AM +0100, Jiri Denemark wrote: > > Whenever a different kernel is booted, some capabilities related to KVM > > (such as CPUID bits) may change. We need to refresh the cache to see the > > changes. > > > > Signed-off-by: Jiri Denemark <jdenemar@redhat.com> > > --- > > > > Notes: > > The capabilities may also change if a parameter passed to a kvm module > > changes (kvm_intel.nested is a good example) so this is not a complete > > solution, but we're hopefully getting closer to it :-) > > You mean getting closer to a situation where we are effectively storing the > cache on tmpfs, because we invalidate it on every reboot ! Well, that's a possible result, yes. Although it's both incomplete and invalidating the cache too often at the same time. It's possible we won't be able to come up with anything more clever anyway. > I think sometime soon we're going to need to consider if our cache invalidation > approach is fundamentally broken. We have a huge amount of stuff we query from > QEMU, but only a tiny amount is dependant on host kernel / microcode / kvm mod > options. Should we go back to invalidating only when libvirt/qemu binary changes > but then do partial invalidation of specific data items for kernel/microcode > changes. On the other hand, while we have QEMU running, probing for all capabilities vs just a limited set which depend on the host shouldn't be a big difference. I haven't actually measured it though. However, we only invalidate the cache more often for KVM, which makes it pretty limited already since we only invalidate the capabilities for a single binary. Jirka -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Mon, Jan 22, 2018 at 01:31:21PM +0100, Jiri Denemark wrote: > On Mon, Jan 22, 2018 at 10:57:57 +0000, Daniel P. Berrange wrote: > > On Mon, Jan 22, 2018 at 11:46:14AM +0100, Jiri Denemark wrote: > > > Whenever a different kernel is booted, some capabilities related to KVM > > > (such as CPUID bits) may change. We need to refresh the cache to see the > > > changes. > > > > > > Signed-off-by: Jiri Denemark <jdenemar@redhat.com> > > > --- > > > > > > Notes: > > > The capabilities may also change if a parameter passed to a kvm module > > > changes (kvm_intel.nested is a good example) so this is not a complete > > > solution, but we're hopefully getting closer to it :-) > > > > You mean getting closer to a situation where we are effectively storing the > > cache on tmpfs, because we invalidate it on every reboot ! > > Well, that's a possible result, yes. Although it's both incomplete and > invalidating the cache too often at the same time. It's possible we > won't be able to come up with anything more clever anyway. > > > I think sometime soon we're going to need to consider if our cache invalidation > > approach is fundamentally broken. We have a huge amount of stuff we query from > > QEMU, but only a tiny amount is dependant on host kernel / microcode / kvm mod > > options. Should we go back to invalidating only when libvirt/qemu binary changes > > but then do partial invalidation of specific data items for kernel/microcode > > changes. > > On the other hand, while we have QEMU running, probing for all > capabilities vs just a limited set which depend on the host shouldn't be > a big difference. I haven't actually measured it though. However, we > only invalidate the cache more often for KVM, which makes it pretty > limited already since we only invalidate the capabilities for a single > binary. Oh true, I didn't notice you'd only done invalidation for the KVM code path. That should avoid the major pain that GNOME Boxes saw where we spend ages probing 20 QEMU binaries on every startup Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On 22.01.2018 15:36, Daniel P. Berrange wrote: > On Mon, Jan 22, 2018 at 01:31:21PM +0100, Jiri Denemark wrote: >> On Mon, Jan 22, 2018 at 10:57:57 +0000, Daniel P. Berrange wrote: >>> On Mon, Jan 22, 2018 at 11:46:14AM +0100, Jiri Denemark wrote: >>>> Whenever a different kernel is booted, some capabilities related to KVM >>>> (such as CPUID bits) may change. We need to refresh the cache to see the >>>> changes. >>>> >>>> Signed-off-by: Jiri Denemark <jdenemar@redhat.com> >>>> --- >>>> >>>> Notes: >>>> The capabilities may also change if a parameter passed to a kvm module >>>> changes (kvm_intel.nested is a good example) so this is not a complete >>>> solution, but we're hopefully getting closer to it :-) >>> >>> You mean getting closer to a situation where we are effectively storing the >>> cache on tmpfs, because we invalidate it on every reboot ! >> >> Well, that's a possible result, yes. Although it's both incomplete and >> invalidating the cache too often at the same time. It's possible we >> won't be able to come up with anything more clever anyway. >> >>> I think sometime soon we're going to need to consider if our cache invalidation >>> approach is fundamentally broken. We have a huge amount of stuff we query from >>> QEMU, but only a tiny amount is dependant on host kernel / microcode / kvm mod >>> options. Should we go back to invalidating only when libvirt/qemu binary changes >>> but then do partial invalidation of specific data items for kernel/microcode >>> changes. >> >> On the other hand, while we have QEMU running, probing for all >> capabilities vs just a limited set which depend on the host shouldn't be >> a big difference. I haven't actually measured it though. However, we >> only invalidate the cache more often for KVM, which makes it pretty >> limited already since we only invalidate the capabilities for a single >> binary. > > Oh true, I didn't notice you'd only done invalidation for the KVM code > path. That should avoid the major pain that GNOME Boxes saw where we > spend ages probing 20 QEMU binaries on every startup > Hi. Hope this topic is not too old... If this is what qemu caps cache for - dealing with a lot of binaries we could disable cache for kvm only and solve issues with kvm_intel.nested. Nikolay -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Tue, Jan 30, 2018 at 13:26:49 +0300, Nikolay Shirokovskiy wrote: > > > On 22.01.2018 15:36, Daniel P. Berrange wrote: > > On Mon, Jan 22, 2018 at 01:31:21PM +0100, Jiri Denemark wrote: > >> On Mon, Jan 22, 2018 at 10:57:57 +0000, Daniel P. Berrange wrote: > >>> On Mon, Jan 22, 2018 at 11:46:14AM +0100, Jiri Denemark wrote: > >>>> Whenever a different kernel is booted, some capabilities related to KVM > >>>> (such as CPUID bits) may change. We need to refresh the cache to see the > >>>> changes. > >>>> > >>>> Signed-off-by: Jiri Denemark <jdenemar@redhat.com> > >>>> --- > >>>> > >>>> Notes: > >>>> The capabilities may also change if a parameter passed to a kvm module > >>>> changes (kvm_intel.nested is a good example) so this is not a complete > >>>> solution, but we're hopefully getting closer to it :-) > >>> > >>> You mean getting closer to a situation where we are effectively storing the > >>> cache on tmpfs, because we invalidate it on every reboot ! > >> > >> Well, that's a possible result, yes. Although it's both incomplete and > >> invalidating the cache too often at the same time. It's possible we > >> won't be able to come up with anything more clever anyway. > >> > >>> I think sometime soon we're going to need to consider if our cache invalidation > >>> approach is fundamentally broken. We have a huge amount of stuff we query from > >>> QEMU, but only a tiny amount is dependant on host kernel / microcode / kvm mod > >>> options. Should we go back to invalidating only when libvirt/qemu binary changes > >>> but then do partial invalidation of specific data items for kernel/microcode > >>> changes. > >> > >> On the other hand, while we have QEMU running, probing for all > >> capabilities vs just a limited set which depend on the host shouldn't be > >> a big difference. I haven't actually measured it though. However, we > >> only invalidate the cache more often for KVM, which makes it pretty > >> limited already since we only invalidate the capabilities for a single > >> binary. > > > > Oh true, I didn't notice you'd only done invalidation for the KVM code > > path. That should avoid the major pain that GNOME Boxes saw where we > > spend ages probing 20 QEMU binaries on every startup > > > > Hi. Hope this topic is not too old... > > If this is what qemu caps cache for - dealing with a lot of binaries > we could disable cache for kvm only and solve issues with kvm_intel.nested. Not really. If you are going to start a bunch of kvm VMs at the same time, you'd still get considerable penalty from trying to re-detect the capabilities every single time. The conversation with qemu used to do the probing exchanges around 300k of JSON, which needs to be processed by libvirt. I don't think we can stop caching capabilities for qemu given the volume of stuff we need to query. -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
© 2016 - 2025 Red Hat, Inc.