Remove the complex and unreliable code which inferred the node name
hierarchy only from data returned by 'query-named-block-nodes'. It turns
out that query-blockstats contain the full hierarchy of nodes as
perceived by qemu so the inference code is not necessary.
In query blockstats, the 'parent' object corresponds to the storage
behind a storage volume and 'backing' corresponds to the lower level of
backing chain. Since all have node names this data can be really easily
used to detect node names.
In addition to the code refactoring the one remaining test case needed
to be fixed along.
---
src/qemu/qemu_block.c | 290 ++++++++-------------
src/qemu/qemu_block.h | 10 +-
.../qemumonitorjson-nodename-basic-blockstats.json | 166 ++++++++++++
...qemumonitorjson-nodename-basic-named-nodes.json | 18 +-
.../qemumonitorjson-nodename-basic.result | 12 +-
tests/qemumonitorjsontest.c | 22 +-
6 files changed, 313 insertions(+), 205 deletions(-)
create mode 100644 tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-blockstats.json
diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c
index dc96a43e1..bca6965fa 100644
--- a/src/qemu/qemu_block.c
+++ b/src/qemu/qemu_block.c
@@ -48,20 +48,15 @@ qemuBlockNamedNodesArrayToHash(size_t pos ATTRIBUTE_UNUSED,
static void
qemuBlockNodeNameBackingChainDataFree(qemuBlockNodeNameBackingChainDataPtr data)
{
- size_t i;
-
if (!data)
return;
- for (i = 0; i < data->nelems; i++)
- virJSONValueFree(data->elems[i]);
-
VIR_FREE(data->nodeformat);
VIR_FREE(data->nodestorage);
- VIR_FREE(data->nodebacking);
VIR_FREE(data->qemufilename);
- VIR_FREE(data->backingstore);
+
+ qemuBlockNodeNameBackingChainDataFree(data->backing);
VIR_FREE(data);
}
@@ -75,55 +70,10 @@ qemuBlockNodeNameBackingChainDataHashEntryFree(void *opaque,
}
-struct qemuBlockNodeNameGetBackingChainData {
- virHashTablePtr table;
- qemuBlockNodeNameBackingChainDataPtr *entries;
- size_t nentries;
-};
-
-
-static int
-qemuBlockNodeNameDetectProcessByFilename(size_t pos ATTRIBUTE_UNUSED,
- virJSONValuePtr item,
- void *opaque)
-{
- struct qemuBlockNodeNameGetBackingChainData *data = opaque;
- qemuBlockNodeNameBackingChainDataPtr entry;
- const char *file;
-
- if (!(file = virJSONValueObjectGetString(item, "file")))
- return 1;
-
- if (!(entry = virHashLookup(data->table, file))) {
- if (VIR_ALLOC(entry) < 0)
- return -1;
-
- if (VIR_APPEND_ELEMENT_COPY(data->entries, data->nentries, entry) < 0) {
- VIR_FREE(entry);
- return -1;
- }
-
- if (VIR_STRDUP(entry->qemufilename, file) < 0)
- return -1;
-
- if (virHashAddEntry(data->table, file, entry) < 0)
- return -1;
- }
-
- if (VIR_APPEND_ELEMENT(entry->elems, entry->nelems, item) < 0)
- return -1;
-
- return 0;
-}
-
-
-static const char *qemuBlockDriversFormat[] = {
- "qcow2", "raw", "qcow", "luks", "qed", "bochs", "cloop", "dmg", "parallels",
- "vdi", "vhdx", "vmdk", "vpc", "vvfat", NULL};
-static const char *qemuBlockDriversStorage[] = {
- "file", "iscsi", "nbd", "host_cdrom", "host_device", "ftp", "ftps",
- "gluster", "http", "https", "nfs", "rbd", "sheepdog", "ssh", "tftp", NULL};
-
+/* list of driver names of layers that qemu automatically adds into the
+ * backing chain */
+static const char *qemuBlockDriversBlockjob[] = {
+ "mirror_top", "commit_top", NULL };
static bool
qemuBlockDriverMatch(const char *drvname,
@@ -140,86 +90,110 @@ qemuBlockDriverMatch(const char *drvname,
}
+struct qemuBlockNodeNameGetBackingChainData {
+ virHashTablePtr nodenamestable;
+ virHashTablePtr disks;
+};
+
+
static int
-qemuBlockNodeNameDetectProcessExtract(qemuBlockNodeNameBackingChainDataPtr data)
+qemuBlockNodeNameGetBackingChainBacking(virJSONValuePtr next,
+ virHashTablePtr nodenamestable,
+ qemuBlockNodeNameBackingChainDataPtr *nodenamedata)
{
- const char *drv;
- const char *nodename;
- const char *backingstore;
- size_t i;
+ qemuBlockNodeNameBackingChainDataPtr data = NULL;
+ qemuBlockNodeNameBackingChainDataPtr backingdata = NULL;
+ virJSONValuePtr backing = virJSONValueObjectGetObject(next, "backing");
+ virJSONValuePtr parent = virJSONValueObjectGetObject(next, "parent");
+ virJSONValuePtr parentnodedata;
+ virJSONValuePtr nodedata;
+ const char *nodename = virJSONValueObjectGetString(next, "node-name");
+ const char *drvname;
+ const char *parentnodename = NULL;
+ const char *filename = NULL;
+ int ret = -1;
- /* Since the only way to construct the backing chain is to look up the files
- * by file name, if two disks share a backing image we can't know which node
- * belongs to which backing chain. Refuse to detect such chains. */
- if (data->nelems > 2)
+ if (!nodename)
return 0;
- for (i = 0; i < data->nelems; i++) {
- drv = virJSONValueObjectGetString(data->elems[i], "drv");
- nodename = virJSONValueObjectGetString(data->elems[i], "node-name");
- backingstore = virJSONValueObjectGetString(data->elems[i], "backing_file");
+ if ((nodedata = virHashLookup(nodenamestable, nodename)) &&
+ (drvname = virJSONValueObjectGetString(nodedata, "drv"))) {
+
+ /* qemu 2.9 reports layers in the backing chain which don't correspond
+ * to files. skip them */
+ if (qemuBlockDriverMatch(drvname, qemuBlockDriversBlockjob)) {
+ if (backing) {
+ return qemuBlockNodeNameGetBackingChainBacking(backing,
+ nodenamestable,
+ nodenamedata);
+ } else {
+ return 0;
+ }
+ }
+ }
- if (!drv || !nodename)
- continue;
+ if (parent &&
+ (parentnodename = virJSONValueObjectGetString(parent, "node-name"))) {
+ if ((parentnodedata = virHashLookup(nodenamestable, parentnodename)))
+ filename = virJSONValueObjectGetString(parentnodedata, "file");
+ }
- if (qemuBlockDriverMatch(drv, qemuBlockDriversFormat)) {
- if (data->nodeformat)
- continue;
+ if (VIR_ALLOC(data) < 0)
+ goto cleanup;
- if (VIR_STRDUP(data->nodeformat, nodename) < 0)
- return -1;
+ if (VIR_STRDUP(data->nodeformat, nodename) < 0 ||
+ VIR_STRDUP(data->nodestorage, parentnodename) < 0 ||
+ VIR_STRDUP(data->qemufilename, filename) < 0)
+ goto cleanup;
- /* extract the backing store file name for the protocol layer */
- if (VIR_STRDUP(data->backingstore, backingstore) < 0)
- return -1;
- } else if (qemuBlockDriverMatch(drv, qemuBlockDriversStorage)) {
- if (data->nodestorage)
- continue;
+ if (backing &&
+ qemuBlockNodeNameGetBackingChainBacking(backing, nodenamestable,
+ &backingdata) < 0)
+ goto cleanup;
- if (VIR_STRDUP(data->nodestorage, nodename) < 0)
- return -1;
- }
- }
+ VIR_STEAL_PTR(data->backing, backingdata);
+ VIR_STEAL_PTR(*nodenamedata, data);
- return 0;
+ ret = 0;
+
+ cleanup:
+ qemuBlockNodeNameBackingChainDataFree(data);
+ return ret;
}
static int
-qemuBlockNodeNameDetectProcessLinkBacking(qemuBlockNodeNameBackingChainDataPtr data,
- virHashTablePtr table)
+qemuBlockNodeNameGetBackingChainDisk(size_t pos ATTRIBUTE_UNUSED,
+ virJSONValuePtr item,
+ void *opaque)
{
- qemuBlockNodeNameBackingChainDataPtr backing;
-
- if (!data->backingstore)
- return 0;
-
- if (!(backing = virHashLookup(table, data->backingstore)))
- return 0;
-
- if (VIR_STRDUP(data->nodebacking, backing->nodeformat) < 0)
- return -1;
+ struct qemuBlockNodeNameGetBackingChainData *data = opaque;
+ const char *device = virJSONValueObjectGetString(item, "device");
+ qemuBlockNodeNameBackingChainDataPtr devicedata = NULL;
+ int ret = -1;
- return 0;
-}
+ if (qemuBlockNodeNameGetBackingChainBacking(item, data->nodenamestable,
+ &devicedata) < 0)
+ goto cleanup;
+ if (devicedata &&
+ virHashAddEntry(data->disks, device, devicedata) < 0)
+ goto cleanup;
-static void
-qemuBlockNodeNameGetBackingChainDataClearLookup(qemuBlockNodeNameBackingChainDataPtr data)
-{
- size_t i;
+ devicedata = NULL;
+ ret = 1; /* we don't really want to steal @item */
- for (i = 0; i < data->nelems; i++)
- virJSONValueFree(data->elems[i]);
+ cleanup:
+ qemuBlockNodeNameBackingChainDataFree(devicedata);
- VIR_FREE(data->elems);
- data->nelems = 0;
+ return ret;
}
/**
* qemuBlockNodeNameGetBackingChain:
- * @json: JSON array of data returned from 'query-named-block-nodes'
+ * @namednodes: JSON array of data returned from 'query-named-block-nodes'
+ * @blockstats: JSON array of data returned from 'query-blockstats'
*
* Tries to reconstruct the backing chain from @json to allow detection of
* node names that were auto-assigned by qemu. This is a best-effort operation
@@ -230,69 +204,40 @@ qemuBlockNodeNameGetBackingChainDataClearLookup(qemuBlockNodeNameBackingChainDat
* Returns a hash table on success and NULL on failure.
*/
virHashTablePtr
-qemuBlockNodeNameGetBackingChain(virJSONValuePtr json)
+qemuBlockNodeNameGetBackingChain(virJSONValuePtr namednodes,
+ virJSONValuePtr blockstats)
{
struct qemuBlockNodeNameGetBackingChainData data;
- virHashTablePtr nodetable = NULL;
+ virHashTablePtr namednodestable = NULL;
+ virHashTablePtr disks = NULL;
virHashTablePtr ret = NULL;
- size_t i;
memset(&data, 0, sizeof(data));
- /* hash table keeps the entries accessible by the 'file' in qemu */
- if (!(data.table = virHashCreate(50, NULL)))
+ if (!(namednodestable = virHashCreate(50, virJSONValueHashFree)))
goto cleanup;
- /* first group the named entries by the 'file' field */
- if (virJSONValueArrayForeachSteal(json,
- qemuBlockNodeNameDetectProcessByFilename,
- &data) < 0)
+ if (virJSONValueArrayForeachSteal(namednodes,
+ qemuBlockNamedNodesArrayToHash,
+ namednodestable) < 0)
goto cleanup;
- /* extract the node names for the format and storage layer */
- for (i = 0; i < data.nentries; i++) {
- if (qemuBlockNodeNameDetectProcessExtract(data.entries[i]) < 0)
- goto cleanup;
- }
-
- /* extract the node name for the backing file */
- for (i = 0; i < data.nentries; i++) {
- if (qemuBlockNodeNameDetectProcessLinkBacking(data.entries[i],
- data.table) < 0)
- goto cleanup;
- }
-
- /* clear JSON data necessary only for the lookup procedure */
- for (i = 0; i < data.nentries; i++)
- qemuBlockNodeNameGetBackingChainDataClearLookup(data.entries[i]);
-
- /* create hash table hashed by the format node name */
- if (!(nodetable = virHashCreate(50,
- qemuBlockNodeNameBackingChainDataHashEntryFree)))
+ if (!(disks = virHashCreate(50, qemuBlockNodeNameBackingChainDataHashEntryFree)))
goto cleanup;
- /* fill the entries */
- for (i = 0; i < data.nentries; i++) {
- if (!data.entries[i]->nodeformat)
- continue;
-
- if (virHashAddEntry(nodetable, data.entries[i]->nodeformat,
- data.entries[i]) < 0)
- goto cleanup;
+ data.nodenamestable = namednodestable;
+ data.disks = disks;
- /* hash table steals the entry and then frees it by itself */
- data.entries[i] = NULL;
- }
+ if (virJSONValueArrayForeachSteal(blockstats,
+ qemuBlockNodeNameGetBackingChainDisk,
+ &data) < 0)
+ goto cleanup;
- VIR_STEAL_PTR(ret, nodetable);
+ VIR_STEAL_PTR(ret, disks);
cleanup:
- virHashFree(data.table);
- virHashFree(nodetable);
- for (i = 0; i < data.nentries; i++)
- qemuBlockNodeNameBackingChainDataFree(data.entries[i]);
-
- VIR_FREE(data.entries);
+ virHashFree(namednodestable);
+ virHashFree(disks);
return ret;
}
@@ -314,20 +259,19 @@ qemuBlockDiskClearDetectedNodes(virDomainDiskDefPtr disk)
static int
qemuBlockDiskDetectNodes(virDomainDiskDefPtr disk,
- const char *parentnode,
- virHashTablePtr table)
+ virHashTablePtr disktable)
{
qemuBlockNodeNameBackingChainDataPtr entry = NULL;
virStorageSourcePtr src = disk->src;
- /* don't attempt the detection if the top level already has node names */
- if (!parentnode || src->nodeformat || src->nodestorage)
+ if (!(entry = virHashLookup(disktable, disk->info.alias)))
return 0;
- while (src && parentnode) {
- if (!(entry = virHashLookup(table, parentnode)))
- break;
+ /* don't attempt the detection if the top level already has node names */
+ if (src->nodeformat || src->nodestorage)
+ return 0;
+ while (src && entry) {
if (src->nodeformat || src->nodestorage) {
if (STRNEQ_NULLABLE(src->nodeformat, entry->nodeformat) ||
STRNEQ_NULLABLE(src->nodestorage, entry->nodestorage))
@@ -340,7 +284,7 @@ qemuBlockDiskDetectNodes(virDomainDiskDefPtr disk,
goto error;
}
- parentnode = entry->nodebacking;
+ entry = entry->backing;
src = src->backingStore;
}
@@ -359,10 +303,9 @@ qemuBlockNodeNamesDetect(virQEMUDriverPtr driver,
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virHashTablePtr disktable = NULL;
- virHashTablePtr nodenametable = NULL;
virJSONValuePtr data = NULL;
+ virJSONValuePtr blockstats = NULL;
virDomainDiskDefPtr disk;
- struct qemuDomainDiskInfo *info;
size_t i;
int ret = -1;
@@ -372,22 +315,19 @@ qemuBlockNodeNamesDetect(virQEMUDriverPtr driver,
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
return -1;
- disktable = qemuMonitorGetBlockInfo(qemuDomainGetMonitor(vm));
data = qemuMonitorQueryNamedBlockNodes(qemuDomainGetMonitor(vm));
+ blockstats = qemuMonitorQueryBlockstats(qemuDomainGetMonitor(vm));
- if (qemuDomainObjExitMonitor(driver, vm) < 0 || !data || !disktable)
+ if (qemuDomainObjExitMonitor(driver, vm) < 0 || !data || !blockstats)
goto cleanup;
- if (!(nodenametable = qemuBlockNodeNameGetBackingChain(data)))
+ if (!(disktable = qemuBlockNodeNameGetBackingChain(data, blockstats)))
goto cleanup;
for (i = 0; i < vm->def->ndisks; i++) {
disk = vm->def->disks[i];
- if (!(info = virHashLookup(disktable, disk->info.alias)))
- continue;
-
- if (qemuBlockDiskDetectNodes(disk, info->nodename, nodenametable) < 0)
+ if (qemuBlockDiskDetectNodes(disk, disktable) < 0)
goto cleanup;
}
@@ -395,7 +335,7 @@ qemuBlockNodeNamesDetect(virQEMUDriverPtr driver,
cleanup:
virJSONValueFree(data);
- virHashFree(nodenametable);
+ virJSONValueFree(blockstats);
virHashFree(disktable);
return ret;
diff --git a/src/qemu/qemu_block.h b/src/qemu/qemu_block.h
index 17dec799f..5d21057a7 100644
--- a/src/qemu/qemu_block.h
+++ b/src/qemu/qemu_block.h
@@ -31,19 +31,15 @@ typedef struct qemuBlockNodeNameBackingChainData qemuBlockNodeNameBackingChainDa
typedef qemuBlockNodeNameBackingChainData *qemuBlockNodeNameBackingChainDataPtr;
struct qemuBlockNodeNameBackingChainData {
char *qemufilename; /* name of the image from qemu */
- char *backingstore;
char *nodeformat; /* node name of the format layer */
char *nodestorage; /* node name of the storage backing the format node */
- char *nodebacking; /* node name of the backing file format layer */
-
- /* data necessary for detection of the node names from qemu */
- virJSONValuePtr *elems;
- size_t nelems;
+ qemuBlockNodeNameBackingChainDataPtr backing;
};
virHashTablePtr
-qemuBlockNodeNameGetBackingChain(virJSONValuePtr data);
+qemuBlockNodeNameGetBackingChain(virJSONValuePtr namednodesdata,
+ virJSONValuePtr blockstats);
int
qemuBlockNodeNamesDetect(virQEMUDriverPtr driver,
diff --git a/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-blockstats.json b/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-blockstats.json
new file mode 100644
index 000000000..c3752b4cf
--- /dev/null
+++ b/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-blockstats.json
@@ -0,0 +1,166 @@
+[
+ {
+ "device": "drive-virtio-disk0",
+ "parent": {
+ "stats": {
+ "flush_total_time_ns": 0,
+ "wr_highest_offset": 32899072,
+ "wr_total_time_ns": 0,
+ "failed_wr_operations": 0,
+ "failed_rd_operations": 0,
+ "wr_merged": 0,
+ "wr_bytes": 0,
+ "timed_stats": [
+
+ ],
+ "failed_flush_operations": 0,
+ "account_invalid": false,
+ "rd_total_time_ns": 0,
+ "flush_operations": 0,
+ "wr_operations": 0,
+ "rd_merged": 0,
+ "rd_bytes": 0,
+ "invalid_flush_operations": 0,
+ "account_failed": false,
+ "rd_operations": 0,
+ "invalid_wr_operations": 0,
+ "invalid_rd_operations": 0
+ },
+ "node-name": "#block033"
+ },
+ "stats": {
+ "flush_total_time_ns": 452246313,
+ "wr_highest_offset": 8072282112,
+ "wr_total_time_ns": 4803102521,
+ "failed_wr_operations": 0,
+ "failed_rd_operations": 0,
+ "wr_merged": 8,
+ "wr_bytes": 6517248,
+ "timed_stats": [
+
+ ],
+ "failed_flush_operations": 0,
+ "account_invalid": true,
+ "rd_total_time_ns": 11065169148,
+ "flush_operations": 10,
+ "wr_operations": 129,
+ "rd_merged": 77,
+ "rd_bytes": 76399104,
+ "invalid_flush_operations": 0,
+ "account_failed": true,
+ "idle_time_ns": 22663656304,
+ "rd_operations": 4038,
+ "invalid_wr_operations": 0,
+ "invalid_rd_operations": 0
+ },
+ "backing": {
+ "parent": {
+ "stats": {
+ "flush_total_time_ns": 0,
+ "wr_highest_offset": 0,
+ "wr_total_time_ns": 0,
+ "failed_wr_operations": 0,
+ "failed_rd_operations": 0,
+ "wr_merged": 0,
+ "wr_bytes": 0,
+ "timed_stats": [
+
+ ],
+ "failed_flush_operations": 0,
+ "account_invalid": false,
+ "rd_total_time_ns": 0,
+ "flush_operations": 0,
+ "wr_operations": 0,
+ "rd_merged": 0,
+ "rd_bytes": 0,
+ "invalid_flush_operations": 0,
+ "account_failed": false,
+ "rd_operations": 0,
+ "invalid_wr_operations": 0,
+ "invalid_rd_operations": 0
+ },
+ "node-name": "#block220"
+ },
+ "stats": {
+ "flush_total_time_ns": 0,
+ "wr_highest_offset": 0,
+ "wr_total_time_ns": 0,
+ "failed_wr_operations": 0,
+ "failed_rd_operations": 0,
+ "wr_merged": 0,
+ "wr_bytes": 0,
+ "timed_stats": [
+
+ ],
+ "failed_flush_operations": 0,
+ "account_invalid": false,
+ "rd_total_time_ns": 0,
+ "flush_operations": 0,
+ "wr_operations": 0,
+ "rd_merged": 0,
+ "rd_bytes": 0,
+ "invalid_flush_operations": 0,
+ "account_failed": false,
+ "rd_operations": 0,
+ "invalid_wr_operations": 0,
+ "invalid_rd_operations": 0
+ },
+ "backing": {
+ "parent": {
+ "stats": {
+ "flush_total_time_ns": 0,
+ "wr_highest_offset": 0,
+ "wr_total_time_ns": 0,
+ "failed_wr_operations": 0,
+ "failed_rd_operations": 0,
+ "wr_merged": 0,
+ "wr_bytes": 0,
+ "timed_stats": [
+
+ ],
+ "failed_flush_operations": 0,
+ "account_invalid": false,
+ "rd_total_time_ns": 0,
+ "flush_operations": 0,
+ "wr_operations": 0,
+ "rd_merged": 0,
+ "rd_bytes": 0,
+ "invalid_flush_operations": 0,
+ "account_failed": false,
+ "rd_operations": 0,
+ "invalid_wr_operations": 0,
+ "invalid_rd_operations": 0
+ },
+ "node-name": "#block481"
+ },
+ "stats": {
+ "flush_total_time_ns": 0,
+ "wr_highest_offset": 0,
+ "wr_total_time_ns": 0,
+ "failed_wr_operations": 0,
+ "failed_rd_operations": 0,
+ "wr_merged": 0,
+ "wr_bytes": 0,
+ "timed_stats": [
+
+ ],
+ "failed_flush_operations": 0,
+ "account_invalid": false,
+ "rd_total_time_ns": 0,
+ "flush_operations": 0,
+ "wr_operations": 0,
+ "rd_merged": 0,
+ "rd_bytes": 0,
+ "invalid_flush_operations": 0,
+ "account_failed": false,
+ "rd_operations": 0,
+ "invalid_wr_operations": 0,
+ "invalid_rd_operations": 0
+ },
+ "node-name": "#block558"
+ },
+ "node-name": "#block306"
+ },
+ "node-name": "#block187"
+ }
+]
diff --git a/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-named-nodes.json b/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-named-nodes.json
index fe2f32176..ce8fdae70 100644
--- a/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-named-nodes.json
+++ b/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-named-nodes.json
@@ -21,7 +21,7 @@
},
"iops_wr": 0,
"ro": true,
- "node-name": "#block567",
+ "node-name": "#block558",
"backing_file_depth": 0,
"drv": "qcow2",
"iops": 0,
@@ -50,7 +50,7 @@
},
"iops_wr": 0,
"ro": true,
- "node-name": "#block424",
+ "node-name": "#block481",
"backing_file_depth": 0,
"drv": "file",
"iops": 0,
@@ -109,7 +109,7 @@
},
"iops_wr": 0,
"ro": true,
- "node-name": "#block331",
+ "node-name": "#block306",
"backing_file_depth": 1,
"drv": "qcow2",
"iops": 0,
@@ -139,7 +139,7 @@
},
"iops_wr": 0,
"ro": true,
- "node-name": "#block281",
+ "node-name": "#block220",
"backing_file_depth": 0,
"drv": "file",
"iops": 0,
@@ -202,7 +202,7 @@
"filename": "/var/lib/libvirt/images/rhel7.3.1483545313",
"cluster-size": 65536,
"format": "qcow2",
- "actual-size": 32968704,
+ "actual-size": 33165312,
"format-specific": {
"type": "qcow2",
"data": {
@@ -218,7 +218,7 @@
},
"iops_wr": 0,
"ro": false,
- "node-name": "#block118",
+ "node-name": "#block187",
"backing_file_depth": 2,
"drv": "qcow2",
"iops": 0,
@@ -240,15 +240,15 @@
"iops_rd": 0,
"detect_zeroes": "off",
"image": {
- "virtual-size": 33030144,
+ "virtual-size": 33226752,
"filename": "/var/lib/libvirt/images/rhel7.3.1483545313",
"format": "file",
- "actual-size": 32968704,
+ "actual-size": 33165312,
"dirty-flag": false
},
"iops_wr": 0,
"ro": false,
- "node-name": "#block078",
+ "node-name": "#block033",
"backing_file_depth": 0,
"drv": "file",
"iops": 0,
diff --git a/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic.result b/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic.result
index 992fc5165..09e0b36ac 100644
--- a/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic.result
+++ b/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic.result
@@ -1,9 +1,9 @@
filename : '/var/lib/libvirt/images/rhel7.3.1483545313'
-format node : '#block118'
-storage node: '#block078'
+format node : '#block187'
+storage node: '#block033'
filename : '/var/lib/libvirt/images/rhel7.3.1483536402'
- format node : '#block331'
- storage node: '#block281'
+ format node : '#block306'
+ storage node: '#block220'
filename : '/var/lib/libvirt/images/rhel7.3.qcow2'
- format node : '#block567'
- storage node: '#block424'
+ format node : '#block558'
+ storage node: '#block481'
diff --git a/tests/qemumonitorjsontest.c b/tests/qemumonitorjsontest.c
index c3e86419d..cb7a7dbdb 100644
--- a/tests/qemumonitorjsontest.c
+++ b/tests/qemumonitorjsontest.c
@@ -2704,18 +2704,17 @@ struct testBlockNodeNameDetectData {
static void
testBlockNodeNameDetectFormat(virBufferPtr buf,
- const char *basenode,
+ const char *diskalias,
virHashTablePtr nodedata)
{
qemuBlockNodeNameBackingChainDataPtr entry = NULL;
- const char *node = basenode;
virBufferSetIndent(buf, 0);
- while (node) {
- if (!(entry = virHashLookup(nodedata, node)))
- break;
+ if (!(entry = virHashLookup(nodedata, diskalias)))
+ return;
+ while (entry) {
virBufferAsprintf(buf, "filename : '%s'\n", entry->qemufilename);
virBufferAsprintf(buf, "format node : '%s'\n",
NULLSTR(entry->nodeformat));
@@ -2724,7 +2723,7 @@ testBlockNodeNameDetectFormat(virBufferPtr buf,
virBufferAdjustIndent(buf, 2);
- node = entry->nodebacking;
+ entry = entry->backing;
}
virBufferSetIndent(buf, 0);
@@ -2742,6 +2741,7 @@ testBlockNodeNameDetect(const void *opaque)
char **nodenames = NULL;
char **next;
virJSONValuePtr namedNodesJson = NULL;
+ virJSONValuePtr blockstatsJson = NULL;
virHashTablePtr nodedata = NULL;
virBuffer buf = VIR_BUFFER_INITIALIZER;
int ret = -1;
@@ -2757,7 +2757,12 @@ testBlockNodeNameDetect(const void *opaque)
"-named-nodes.json", NULL)))
goto cleanup;
- if (!(nodedata = qemuBlockNodeNameGetBackingChain(namedNodesJson)))
+ if (!(blockstatsJson = virTestLoadFileJSON(pathprefix, data->name,
+ "-blockstats.json", NULL)))
+ goto cleanup;
+
+ if (!(nodedata = qemuBlockNodeNameGetBackingChain(namedNodesJson,
+ blockstatsJson)))
goto cleanup;
for (next = nodenames; *next; next++)
@@ -2781,6 +2786,7 @@ testBlockNodeNameDetect(const void *opaque)
virHashFree(nodedata);
virStringListFree(nodenames);
virJSONValueFree(namedNodesJson);
+ virJSONValueFree(blockstatsJson);
return ret;
}
@@ -2929,7 +2935,7 @@ mymain(void)
ret = -1; \
} while (0)
- DO_TEST_BLOCK_NODE_DETECT("basic", "#block118");
+ DO_TEST_BLOCK_NODE_DETECT("basic", "drive-virtio-disk0");
/* DO_TEST_BLOCK_NODE_DETECT("same-backing", "#block170,#block574"); */
/* DO_TEST_BLOCK_NODE_DETECT("relative", "#block153,#block1177"); */
/* DO_TEST_BLOCK_NODE_DETECT("gluster", "#block1008"); */
--
2.13.2
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
On 07/26/2017 05:00 AM, Peter Krempa wrote: > Remove the complex and unreliable code which inferred the node name > hierarchy only from data returned by 'query-named-block-nodes'. It turns > out that query-blockstats contain the full hierarchy of nodes as > perceived by qemu so the inference code is not necessary. > > In query blockstats, the 'parent' object corresponds to the storage > behind a storage volume and 'backing' corresponds to the lower level of > backing chain. Since all have node names this data can be really easily > used to detect node names. > > In addition to the code refactoring the one remaining test case needed > to be fixed along. The diff is hard to read given how much changed; it's easier to just focus on the code additions to see if the new stuff makes sense, ignoring what used to be there. > --- > src/qemu/qemu_block.c | 290 ++++++++------------- > src/qemu/qemu_block.h | 10 +- > .../qemumonitorjson-nodename-basic-blockstats.json | 166 ++++++++++++ > ...qemumonitorjson-nodename-basic-named-nodes.json | 18 +- > .../qemumonitorjson-nodename-basic.result | 12 +- > tests/qemumonitorjsontest.c | 22 +- > 6 files changed, 313 insertions(+), 205 deletions(-) > create mode 100644 tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-blockstats.json > > - > +/* list of driver names of layers that qemu automatically adds into the > + * backing chain */ > +static const char *qemuBlockDriversBlockjob[] = { > + "mirror_top", "commit_top", NULL }; This list may change over time; also, qemu 2.10 will hide these implicit nodes (or did 2.9 expose them, so we are stuck dealing with them?) > + > + /* qemu 2.9 reports layers in the backing chain which don't correspond > + * to files. skip them */ > + if (qemuBlockDriverMatch(drvname, qemuBlockDriversBlockjob)) { And that answers my question. > +qemuBlockNodeNameGetBackingChainDisk(size_t pos ATTRIBUTE_UNUSED, > + virJSONValuePtr item, > + void *opaque) > + if (qemuBlockNodeNameGetBackingChainBacking(item, data->nodenamestable, > + &devicedata) < 0) Sounds like we're stuttering on Backing; but I guess it makes sense (we are doing pieces of 'GetBackingChain', with one piece getting the disk and the other getting the next backing level). > /** > * qemuBlockNodeNameGetBackingChain: > - * @json: JSON array of data returned from 'query-named-block-nodes' > + * @namednodes: JSON array of data returned from 'query-named-block-nodes' > + * @blockstats: JSON array of data returned from 'query-blockstats' So we really have to query two pieces of information, then thread them together (but at least the threading is easy to do, now that qemu auto-names every node); definitely better than our old code of reconstructing the threading by filename guesses. But it also goes to show why Kevin is working on a new query-block-something for 2.11 that will be less painful; so while we'll have to keep this cruft for as long as we support 2.9/2.10, we'll want to give feedback to Kevin's improvements that will let us rebuild things in less time. Or even better, we reach the point where we assign all node names ourselves, and don't even have to do reconstructive queries. But that's for down the road; now back to reviewing this patch. > +++ b/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-named-nodes.json > @@ -21,7 +21,7 @@ > }, > "iops_wr": 0, > "ro": true, > - "node-name": "#block567", > + "node-name": "#block558", Trivia: qemu randomizes the last two digits (so you HAVE to query rather than making hard-coded assumptions about what node name will be auto-assigned); but the leading digits are merely a counter of how many nodes qemu previously had to name (this node thus had 5 preceding nodes) - so having the same #block5XX means you created your new trace using the same steps of node creation as the old trace ;) > - DO_TEST_BLOCK_NODE_DETECT("basic", "#block118"); > + DO_TEST_BLOCK_NODE_DETECT("basic", "drive-virtio-disk0"); The real payout of your new scheme: you get to start your query from a block device name (under libvirt control) rather than from an intermediate generated node name! I didn't spot anything blatantly wrong, and the new code DOES seem less convoluted (yeah, we have to bounce between two arrays to piece things together, but we aren't reconstructing threading by hand), and, as the commit message says, you're now able to handle a case that previously failed without the use of query-blockstats. I'd still like to test things once I've gone over the whole series, but if I get through my review to 24/24 without any further red flags, you can add: Reviewed-by: Eric Blake <eblake@redhat.com> -- Eric Blake, Principal Software Engineer Red Hat, Inc. +1-919-301-3266 Virtualization: qemu.org | libvirt.org -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Wed, Jul 26, 2017 at 16:21:25 -0500, Eric Blake wrote: > On 07/26/2017 05:00 AM, Peter Krempa wrote: > > Remove the complex and unreliable code which inferred the node name > > hierarchy only from data returned by 'query-named-block-nodes'. It turns > > out that query-blockstats contain the full hierarchy of nodes as > > perceived by qemu so the inference code is not necessary. > > > > In query blockstats, the 'parent' object corresponds to the storage > > behind a storage volume and 'backing' corresponds to the lower level of > > backing chain. Since all have node names this data can be really easily > > used to detect node names. > > > > In addition to the code refactoring the one remaining test case needed > > to be fixed along. > > The diff is hard to read given how much changed; it's easier to just > focus on the code additions to see if the new stuff makes sense, > ignoring what used to be there. > > > --- > > src/qemu/qemu_block.c | 290 ++++++++------------- > > src/qemu/qemu_block.h | 10 +- > > .../qemumonitorjson-nodename-basic-blockstats.json | 166 ++++++++++++ > > ...qemumonitorjson-nodename-basic-named-nodes.json | 18 +- > > .../qemumonitorjson-nodename-basic.result | 12 +- > > tests/qemumonitorjsontest.c | 22 +- > > 6 files changed, 313 insertions(+), 205 deletions(-) > > create mode 100644 tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-blockstats.json [...] > So we really have to query two pieces of information, then thread them > together (but at least the threading is easy to do, now that qemu > auto-names every node); definitely better than our old code of > reconstructing the threading by filename guesses. But it also goes to > show why Kevin is working on a new query-block-something for 2.11 that > will be less painful; so while we'll have to keep this cruft for as long > as we support 2.9/2.10, we'll want to give feedback to Kevin's > improvements that will let us rebuild things in less time. Or even > better, we reach the point where we assign all node names ourselves, and > don't even have to do reconstructive queries. But that's for down the Yes that is the final plan and I sure hope to make it happen soon :). With assigning names manually we will have to keep this code for backcompat with VMs started on older versions without assigned node names. Also this code allows the block threshold event for qemu 2.5 - 2.9. I take 2.9 as the first version where assigning names will be feasible due to fixed blockdev add. > road; now back to reviewing this patch. > > > +++ b/tests/qemumonitorjsondata/qemumonitorjson-nodename-basic-named-nodes.json > > @@ -21,7 +21,7 @@ > > }, > > "iops_wr": 0, > > "ro": true, > > - "node-name": "#block567", > > + "node-name": "#block558", > > Trivia: qemu randomizes the last two digits (so you HAVE to query rather > than making hard-coded assumptions about what node name will be > auto-assigned); but the leading digits are merely a counter of how many > nodes qemu previously had to name (this node thus had 5 preceding nodes) > - so having the same #block5XX means you created your new trace using > the same steps of node creation as the old trace ;) I read the commit adding the "object name generator" but did not quite get this detail from reading it. Thanks for enlightenment :) -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
© 2016 - 2025 Red Hat, Inc.