Recently, bhyve started supporting specifying guest CPU topology.
It looks this way:
bhyve -c cpus=C,sockets=S,cores=C,threads=T ...
The old behaviour with bhyve -c C, where C is a number of vCPUs, is
still supported.
So if we have CPU topology in the domain XML, use the new syntax,
otherwise keeps the old behaviour.
Signed-off-by: Roman Bogorodskiy <bogorodskiy@gmail.com>
---
src/bhyve/bhyve_capabilities.c | 7 +++--
src/bhyve/bhyve_capabilities.h | 1 +
src/bhyve/bhyve_command.c | 17 +++++++++++-
.../bhyvexml2argv-cputopology.args | 9 +++++++
.../bhyvexml2argv-cputopology.ldargs | 3 +++
.../bhyvexml2argv-cputopology.xml | 26 +++++++++++++++++++
tests/bhyvexml2argvtest.c | 7 ++++-
7 files changed, 66 insertions(+), 4 deletions(-)
create mode 100644 tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.args
create mode 100644 tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.ldargs
create mode 100644 tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.xml
diff --git a/src/bhyve/bhyve_capabilities.c b/src/bhyve/bhyve_capabilities.c
index e13085b1d5..a3229cea75 100644
--- a/src/bhyve/bhyve_capabilities.c
+++ b/src/bhyve/bhyve_capabilities.c
@@ -227,7 +227,7 @@ bhyveProbeCapsDeviceHelper(unsigned int *caps,
}
static int
-bhyveProbeCapsRTC_UTC(unsigned int *caps, char *binary)
+bhyveProbeCapsFromHelp(unsigned int *caps, char *binary)
{
char *help;
virCommandPtr cmd = NULL;
@@ -244,6 +244,9 @@ bhyveProbeCapsRTC_UTC(unsigned int *caps, char *binary)
if (strstr(help, "-u:") != NULL)
*caps |= BHYVE_CAP_RTC_UTC;
+ if (strstr(help, "sockets=n][,cores=n][,threads=n") != NULL)
+ *caps |= BHYVE_CAP_CPUTOPOLOGY;
+
out:
VIR_FREE(help);
virCommandFree(cmd);
@@ -314,7 +317,7 @@ virBhyveProbeCaps(unsigned int *caps)
if (binary == NULL)
goto out;
- if ((ret = bhyveProbeCapsRTC_UTC(caps, binary)))
+ if ((ret = bhyveProbeCapsFromHelp(caps, binary)))
goto out;
if ((ret = bhyveProbeCapsAHCI32Slot(caps, binary)))
diff --git a/src/bhyve/bhyve_capabilities.h b/src/bhyve/bhyve_capabilities.h
index 0e310e6eda..873bc9c12d 100644
--- a/src/bhyve/bhyve_capabilities.h
+++ b/src/bhyve/bhyve_capabilities.h
@@ -49,6 +49,7 @@ typedef enum {
BHYVE_CAP_LPC_BOOTROM = 1 << 3,
BHYVE_CAP_FBUF = 1 << 4,
BHYVE_CAP_XHCI = 1 << 5,
+ BHYVE_CAP_CPUTOPOLOGY = 1 << 6,
} virBhyveCapsFlags;
int virBhyveProbeGrubCaps(virBhyveGrubCapsFlags *caps);
diff --git a/src/bhyve/bhyve_command.c b/src/bhyve/bhyve_command.c
index e3f7ded7db..b319518520 100644
--- a/src/bhyve/bhyve_command.c
+++ b/src/bhyve/bhyve_command.c
@@ -467,7 +467,22 @@ virBhyveProcessBuildBhyveCmd(virConnectPtr conn,
/* CPUs */
virCommandAddArg(cmd, "-c");
- virCommandAddArgFormat(cmd, "%d", virDomainDefGetVcpus(def));
+ if (def->cpu && def->cpu->sockets) {
+ if ((bhyveDriverGetCaps(conn) & BHYVE_CAP_CPUTOPOLOGY) != 0) {
+ virCommandAddArgFormat(cmd, "cpus=%d,sockets=%d,cores=%d,threads=%d",
+ virDomainDefGetVcpus(def),
+ def->cpu->sockets,
+ def->cpu->cores,
+ def->cpu->threads);
+ } else {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("Installed bhyve binary does not support "
+ "defining CPU topology"));
+ goto error;
+ }
+ } else {
+ virCommandAddArgFormat(cmd, "%d", virDomainDefGetVcpus(def));
+ }
/* Memory */
virCommandAddArg(cmd, "-m");
diff --git a/tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.args b/tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.args
new file mode 100644
index 0000000000..2d175a4178
--- /dev/null
+++ b/tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.args
@@ -0,0 +1,9 @@
+/usr/sbin/bhyve \
+-c cpus=2,sockets=1,cores=2,threads=1 \
+-m 214 \
+-u \
+-H \
+-P \
+-s 0:0,hostbridge \
+-s 2:0,ahci,hd:/tmp/freebsd.img \
+-s 3:0,virtio-net,faketapdev,mac=52:54:00:b9:94:02 bhyve
diff --git a/tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.ldargs b/tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.ldargs
new file mode 100644
index 0000000000..32538b558e
--- /dev/null
+++ b/tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.ldargs
@@ -0,0 +1,3 @@
+/usr/sbin/bhyveload \
+-m 214 \
+-d /tmp/freebsd.img bhyve
diff --git a/tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.xml b/tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.xml
new file mode 100644
index 0000000000..83c7d423c4
--- /dev/null
+++ b/tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.xml
@@ -0,0 +1,26 @@
+<domain type='bhyve'>
+ <name>bhyve</name>
+ <uuid>df3be7e7-a104-11e3-aeb0-50e5492bd3dc</uuid>
+ <memory>219136</memory>
+ <vcpu>2</vcpu>
+ <cpu>
+ <topology sockets='1' cores='2' threads='1'/>
+ </cpu>
+ <os>
+ <type>hvm</type>
+ </os>
+ <devices>
+ <disk type='file'>
+ <driver name='file' type='raw'/>
+ <source file='/tmp/freebsd.img'/>
+ <target dev='hda' bus='sata'/>
+ <address type='drive' controller='0' bus='0' target='2' unit='0'/>
+ </disk>
+ <interface type='bridge'>
+ <mac address='52:54:00:b9:94:02'/>
+ <model type='virtio'/>
+ <source bridge="virbr0"/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+ </interface>
+ </devices>
+</domain>
diff --git a/tests/bhyvexml2argvtest.c b/tests/bhyvexml2argvtest.c
index b08b1675f3..1c12bfcff3 100644
--- a/tests/bhyvexml2argvtest.c
+++ b/tests/bhyvexml2argvtest.c
@@ -176,7 +176,8 @@ mymain(void)
driver.grubcaps = BHYVE_GRUB_CAP_CONSDEV;
driver.bhyvecaps = BHYVE_CAP_RTC_UTC | BHYVE_CAP_AHCI32SLOT | \
BHYVE_CAP_NET_E1000 | BHYVE_CAP_LPC_BOOTROM | \
- BHYVE_CAP_FBUF | BHYVE_CAP_XHCI;
+ BHYVE_CAP_FBUF | BHYVE_CAP_XHCI | \
+ BHYVE_CAP_CPUTOPOLOGY;
DO_TEST("base");
DO_TEST("wired");
@@ -207,6 +208,7 @@ mymain(void)
DO_TEST("vnc-vgaconf-off");
DO_TEST("vnc-vgaconf-io");
DO_TEST("vnc-autoport");
+ DO_TEST("cputopology");
/* Address allocation tests */
DO_TEST("addr-single-sata-disk");
@@ -243,6 +245,9 @@ mymain(void)
driver.bhyvecaps &= ~BHYVE_CAP_FBUF;
DO_TEST_FAILURE("vnc");
+ driver.bhyvecaps &= ~BHYVE_CAP_CPUTOPOLOGY;
+ DO_TEST_FAILURE("cputopology");
+
virObjectUnref(driver.caps);
virObjectUnref(driver.xmlopt);
virPortAllocatorRangeFree(driver.remotePorts);
--
2.17.0
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
On Mon, May 28, 2018 at 20:27:50 +0400, Roman Bogorodskiy wrote: > Recently, bhyve started supporting specifying guest CPU topology. > It looks this way: > > bhyve -c cpus=C,sockets=S,cores=C,threads=T ... > > The old behaviour with bhyve -c C, where C is a number of vCPUs, is > still supported. > > So if we have CPU topology in the domain XML, use the new syntax, > otherwise keeps the old behaviour. > > Signed-off-by: Roman Bogorodskiy <bogorodskiy@gmail.com> > --- > src/bhyve/bhyve_capabilities.c | 7 +++-- > src/bhyve/bhyve_capabilities.h | 1 + > src/bhyve/bhyve_command.c | 17 +++++++++++- > .../bhyvexml2argv-cputopology.args | 9 +++++++ > .../bhyvexml2argv-cputopology.ldargs | 3 +++ > .../bhyvexml2argv-cputopology.xml | 26 +++++++++++++++++++ > tests/bhyvexml2argvtest.c | 7 ++++- > 7 files changed, 66 insertions(+), 4 deletions(-) > create mode 100644 tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.args > create mode 100644 tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.ldargs > create mode 100644 tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.xml [...] > diff --git a/src/bhyve/bhyve_command.c b/src/bhyve/bhyve_command.c > index e3f7ded7db..b319518520 100644 > --- a/src/bhyve/bhyve_command.c > +++ b/src/bhyve/bhyve_command.c > @@ -467,7 +467,22 @@ virBhyveProcessBuildBhyveCmd(virConnectPtr conn, > > /* CPUs */ > virCommandAddArg(cmd, "-c"); > - virCommandAddArgFormat(cmd, "%d", virDomainDefGetVcpus(def)); > + if (def->cpu && def->cpu->sockets) { > + if ((bhyveDriverGetCaps(conn) & BHYVE_CAP_CPUTOPOLOGY) != 0) { > + virCommandAddArgFormat(cmd, "cpus=%d,sockets=%d,cores=%d,threads=%d", > + virDomainDefGetVcpus(def), > + def->cpu->sockets, > + def->cpu->cores, > + def->cpu->threads); Note that libvirt XML->def conversion does not validate that def->nvcpus equals to def->cpu->sockets * def->cpu->cores * def->cpu->threads. This is a historic artefact since qemu did not do it either. They started doing it just recently. It might be worth adding that check to be sure in the future. > + } else { > + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", > + _("Installed bhyve binary does not support " > + "defining CPU topology")); > + goto error; > + } The rest looks good to me, so ACK if you don't think the check for the topology<->vcpu count is important enough. -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Peter Krempa wrote: > On Mon, May 28, 2018 at 20:27:50 +0400, Roman Bogorodskiy wrote: > > Recently, bhyve started supporting specifying guest CPU topology. > > It looks this way: > > > > bhyve -c cpus=C,sockets=S,cores=C,threads=T ... > > > > The old behaviour with bhyve -c C, where C is a number of vCPUs, is > > still supported. > > > > So if we have CPU topology in the domain XML, use the new syntax, > > otherwise keeps the old behaviour. > > > > Signed-off-by: Roman Bogorodskiy <bogorodskiy@gmail.com> > > --- > > src/bhyve/bhyve_capabilities.c | 7 +++-- > > src/bhyve/bhyve_capabilities.h | 1 + > > src/bhyve/bhyve_command.c | 17 +++++++++++- > > .../bhyvexml2argv-cputopology.args | 9 +++++++ > > .../bhyvexml2argv-cputopology.ldargs | 3 +++ > > .../bhyvexml2argv-cputopology.xml | 26 +++++++++++++++++++ > > tests/bhyvexml2argvtest.c | 7 ++++- > > 7 files changed, 66 insertions(+), 4 deletions(-) > > create mode 100644 tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.args > > create mode 100644 tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.ldargs > > create mode 100644 tests/bhyvexml2argvdata/bhyvexml2argv-cputopology.xml > > [...] > > > diff --git a/src/bhyve/bhyve_command.c b/src/bhyve/bhyve_command.c > > index e3f7ded7db..b319518520 100644 > > --- a/src/bhyve/bhyve_command.c > > +++ b/src/bhyve/bhyve_command.c > > @@ -467,7 +467,22 @@ virBhyveProcessBuildBhyveCmd(virConnectPtr conn, > > > > /* CPUs */ > > virCommandAddArg(cmd, "-c"); > > - virCommandAddArgFormat(cmd, "%d", virDomainDefGetVcpus(def)); > > + if (def->cpu && def->cpu->sockets) { > > + if ((bhyveDriverGetCaps(conn) & BHYVE_CAP_CPUTOPOLOGY) != 0) { > > + virCommandAddArgFormat(cmd, "cpus=%d,sockets=%d,cores=%d,threads=%d", > > + virDomainDefGetVcpus(def), > > + def->cpu->sockets, > > + def->cpu->cores, > > + def->cpu->threads); > > Note that libvirt XML->def conversion does not validate that def->nvcpus > equals to def->cpu->sockets * def->cpu->cores * def->cpu->threads. > > This is a historic artefact since qemu did not do it either. They > started doing it just recently. It might be worth adding that check to > be sure in the future. Good point! Indeed, bhyve validates CPU topology in a way you described, so it makes sense to fail early instead of waiting the command to fail. I'll roll a v2. > > + } else { > > + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", > > + _("Installed bhyve binary does not support " > > + "defining CPU topology")); > > + goto error; > > + } > > The rest looks good to me, so ACK if you don't think the check for the > topology<->vcpu count is important enough. Roman Bogorodskiy -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
© 2016 - 2025 Red Hat, Inc.