[RFC PATCH v3] contrib/plugins: control flow plugin

Alex Bennée posted 1 patch 1 month, 3 weeks ago
contrib/plugins/cflow.c  | 364 +++++++++++++++++++++++++++++++++++++++
contrib/plugins/Makefile |   1 +
2 files changed, 365 insertions(+)
create mode 100644 contrib/plugins/cflow.c
[RFC PATCH v3] contrib/plugins: control flow plugin
Posted by Alex Bennée 1 month, 3 weeks ago
This is a simple control flow tracking plugin that uses the latest
inline and conditional operations to detect and track control flow
changes. It is currently an exercise at seeing how useful the changes
are.

Based-on: <20240312075428.244210-1-pierrick.bouvier@linaro.org>
Cc: Gustavo Romero <gustavo.romero@linaro.org>
Cc: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <20240311153432.1395190-1-alex.bennee@linaro.org>

---
v2
  - only need a single call back
  - drop need for INSN_WIDTH
  - still don't understand the early exits

v3
  - move initial STORE ops to first instruction to avoid confusion
  with the conditional callback on the start
  - filter out non-branches before processing
  - fix off-by-one with accounting
  - display "sync fault" or "branch" instead of raw numbers
---
 contrib/plugins/cflow.c  | 364 +++++++++++++++++++++++++++++++++++++++
 contrib/plugins/Makefile |   1 +
 2 files changed, 365 insertions(+)
 create mode 100644 contrib/plugins/cflow.c

diff --git a/contrib/plugins/cflow.c b/contrib/plugins/cflow.c
new file mode 100644
index 0000000000..e5b0d4bbbd
--- /dev/null
+++ b/contrib/plugins/cflow.c
@@ -0,0 +1,364 @@
+/*
+ * Control Flow plugin
+ *
+ * This plugin will track changes to control flow and detect where
+ * instructions fault.
+ *
+ * Copyright (c) 2024 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include <glib.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+typedef enum {
+    SORT_HOTDEST,  /* hottest branch */
+    SORT_EARLY,    /* most early exits */
+    SORT_POPDEST,  /* most destinations */
+} ReportType;
+
+ReportType report = SORT_HOTDEST;
+int topn = 10;
+
+typedef struct {
+    uint64_t daddr;
+    uint64_t dcount;
+} DestData;
+
+/* A node is an address where we can go to multiple places */
+typedef struct {
+    GMutex lock;
+    /* address of the branch point */
+    uint64_t addr;
+    /* array of DestData */
+    GArray *dests;
+    /* early exit/fault count */
+    uint64_t early_exit;
+    /* jump destination count */
+    uint64_t dest_count;
+    /* instruction data */
+    char *insn_disas;
+    /* symbol? */
+    const char *symbol;
+    /* times translated as last in block? */
+    int last_count;
+    /* times translated in the middle of block? */
+    int mid_count;
+} NodeData;
+
+/* We use this to track the current execution state */
+typedef struct {
+    /* address of end of block */
+    uint64_t end_block;
+    /* next pc after end of block */
+    uint64_t pc_after_block;
+    /* address of last executed PC */
+    uint64_t last_pc;
+} VCPUScoreBoard;
+
+/* descriptors for accessing the above scoreboard */
+static qemu_plugin_u64 end_block;
+static qemu_plugin_u64 pc_after_block;
+static qemu_plugin_u64 last_pc;
+
+
+static GMutex node_lock;
+static GHashTable *nodes;
+struct qemu_plugin_scoreboard *state;
+
+/* SORT_HOTDEST */
+static gint hottest(gconstpointer a, gconstpointer b)
+{
+    NodeData *na = (NodeData *) a;
+    NodeData *nb = (NodeData *) b;
+
+    return na->dest_count > nb->dest_count ? -1 :
+        na->dest_count == nb->dest_count ? 0 : 1;
+}
+
+static gint early(gconstpointer a, gconstpointer b)
+{
+    NodeData *na = (NodeData *) a;
+    NodeData *nb = (NodeData *) b;
+
+    return na->early_exit > nb->early_exit ? -1 :
+        na->early_exit == nb->early_exit ? 0 : 1;
+}
+
+static gint popular(gconstpointer a, gconstpointer b)
+{
+    NodeData *na = (NodeData *) a;
+    NodeData *nb = (NodeData *) b;
+
+    return na->dests->len > nb->dests->len ? -1 :
+        na->dests->len == nb->dests->len ? 0 : 1;
+}
+
+/* Filter out non-branches - returns true to remove entry */
+static gboolean filter_non_branches(gpointer key, gpointer value, gpointer user_data)
+{
+    NodeData *node = (NodeData *) value;
+
+    return node->dest_count == 0;
+}
+
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+    g_autoptr(GString) result = g_string_new("collected ");
+    GList *data;
+    GCompareFunc sort = &hottest;
+    int n = 0;
+
+    g_mutex_lock(&node_lock);
+    g_string_append_printf(result, "%d control flow nodes in the hash table\n",
+                           g_hash_table_size(nodes));
+
+    /* remove all nodes that didn't branch */
+    g_hash_table_foreach_remove(nodes, filter_non_branches, NULL);
+
+    data = g_hash_table_get_values(nodes);
+
+    switch (report) {
+    case SORT_HOTDEST:
+        sort = &hottest;
+        break;
+    case SORT_EARLY:
+        sort = &early;
+        break;
+    case SORT_POPDEST:
+        sort = &popular;
+        break;
+    }
+
+    data = g_list_sort(data, sort);
+
+    for (GList *l = data;
+         l != NULL && n < topn;
+         l = l->next, n++) {
+        NodeData *n = l->data;
+        const char *type = n->mid_count ? "sync fault" : "branch";
+        g_string_append_printf(result, "  addr: 0x%"PRIx64 " %s: %s (%s)\n",
+                               n->addr, n->symbol, n->insn_disas, type);
+        if (n->early_exit) {
+            g_string_append_printf(result, "    early exits %"PRId64"\n",
+                                   n->early_exit);
+        }
+        g_string_append_printf(result, "    branches %"PRId64"\n",
+                               n->dest_count);
+        for (int j = 0; j < n->dests->len; j++ ) {
+            DestData *dd = &g_array_index(n->dests, DestData, j);
+            g_string_append_printf(result, "      to 0x%"PRIx64" (%"PRId64")\n",
+                                   dd->daddr, dd->dcount);
+        }
+    }
+
+    qemu_plugin_outs(result->str);
+
+    g_mutex_unlock(&node_lock);
+}
+
+static void plugin_init(void)
+{
+    g_mutex_init(&node_lock);
+    nodes = g_hash_table_new(NULL, g_direct_equal);
+    state = qemu_plugin_scoreboard_new(sizeof(VCPUScoreBoard));
+
+    /* score board declarations */
+    end_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, end_block);
+    pc_after_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, pc_after_block);
+    last_pc = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, last_pc);
+}
+
+static NodeData *create_node(uint64_t addr)
+{
+    NodeData *node = g_new0(NodeData, 1);
+    g_mutex_init(&node->lock);
+    node->addr = addr;
+    node->dests = g_array_new(true, true, sizeof(DestData));
+    return node;
+}
+
+static NodeData *fetch_node(uint64_t addr, bool create_if_not_found)
+{
+    NodeData *node = NULL;
+
+    g_mutex_lock(&node_lock);
+    node = (NodeData *) g_hash_table_lookup(nodes, (gconstpointer) addr);
+    if (!node && create_if_not_found) {
+        node = create_node(addr);
+        g_hash_table_insert(nodes, (gpointer) addr, (gpointer) node);
+    }
+    g_mutex_unlock(&node_lock);
+    return node;
+}
+
+/*
+ * Called when we detect a non-linear execution (pc !=
+ * pc_after_block). This could be due to a fault causing some sort of
+ * exit exception (if last_pc != block_end) or just a taken branch.
+ */
+static void vcpu_tb_branched_exec(unsigned int cpu_index, void *udata)
+{
+    uint64_t lpc = qemu_plugin_u64_get(last_pc, cpu_index);
+    uint64_t ebpc = qemu_plugin_u64_get(end_block, cpu_index);
+    uint64_t npc = qemu_plugin_u64_get(pc_after_block, cpu_index);
+    uint64_t pc = GPOINTER_TO_UINT(udata);
+
+    /* return early for address 0 */
+    if (!lpc) {
+        return;
+    }
+
+    NodeData *node = fetch_node(lpc, true);
+    DestData *data = NULL;
+    bool early_exit = (lpc != ebpc);
+    GArray *dests;
+
+    /* the condition should never hit */
+    g_assert(pc != npc);
+
+    g_mutex_lock(&node->lock);
+
+    if (early_exit) {
+        fprintf(stderr, "%s: pc=%"PRIx64", epbc=%"PRIx64
+                " npc=%"PRIx64", lpc=%"PRIx64", \n",
+                __func__, pc, ebpc, npc, lpc);
+        node->early_exit++;
+        if (!node->mid_count) {
+            /* count now as we've only just allocated */
+            node->mid_count++;
+        }
+    }
+
+    dests = node->dests;
+    for (int i = 0; i < dests->len; i++) {
+        if (g_array_index(dests, DestData, i).daddr == pc) {
+            data = &g_array_index(dests, DestData, i);
+        }
+    }
+
+    /* we've never seen this before, allocate a new entry */
+    if (!data) {
+        DestData new_entry = { .daddr = pc };
+        g_array_append_val(dests, new_entry);
+        data = &g_array_index(dests, DestData, dests->len - 1);
+        g_assert(data->daddr == pc);
+    }
+
+    data->dcount++;
+    node->dest_count++;
+
+    g_mutex_unlock(&node->lock);
+}
+
+/*
+ * At the start of each block we need to resolve two things:
+ *
+ *  - is last_pc == block_end, if not we had an early exit
+ *  - is start of block last_pc + insn width, if not we jumped
+ *
+ * Once those are dealt with we can instrument the rest of the
+ * instructions for their execution.
+ *
+ */
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+    uint64_t pc = qemu_plugin_tb_vaddr(tb);
+    size_t insns = qemu_plugin_tb_n_insns(tb);
+    struct qemu_plugin_insn *first_insn = qemu_plugin_tb_get_insn(tb, 0);
+    struct qemu_plugin_insn *last_insn = qemu_plugin_tb_get_insn(tb, insns - 1);
+
+    /*
+     * check if we are executing linearly after the last block. We can
+     * handle both early block exits and normal branches in the
+     * callback if we hit it.
+     */
+    gpointer udata = GUINT_TO_POINTER(pc);
+    qemu_plugin_register_vcpu_tb_exec_cond_cb(
+        tb, vcpu_tb_branched_exec, QEMU_PLUGIN_CB_NO_REGS,
+        QEMU_PLUGIN_COND_NE, pc_after_block, pc, udata);
+
+    /*
+     * Now we can set start/end for this block so the next block can
+     * check where we are at. Do this on the first instruction and not
+     * the TB so we don't get mixed up with above.
+     */
+    qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn,
+                                                      QEMU_PLUGIN_INLINE_STORE_U64,
+                                                      end_block, qemu_plugin_insn_vaddr(last_insn));
+    qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn,
+                                                      QEMU_PLUGIN_INLINE_STORE_U64,
+                                                      pc_after_block,
+                                                      qemu_plugin_insn_vaddr(last_insn) +
+                                                      qemu_plugin_insn_size(last_insn));
+
+    for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
+        struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
+        uint64_t ipc = qemu_plugin_insn_vaddr(insn);
+        /*
+         * If this is a potential branch point check if we could grab
+         * the disassembly for it. If it is the last instruction
+         * always create an entry.
+         */
+        NodeData *node = fetch_node(ipc, last_insn);
+        if (node) {
+            g_mutex_lock(&node->lock);
+            if (!node->insn_disas) {
+                node->insn_disas = qemu_plugin_insn_disas(insn);
+            }
+            if (!node->symbol) {
+                node->symbol = qemu_plugin_insn_symbol(insn);
+            }
+            if (last_insn == insn) {
+                node->last_count++;
+            } else {
+                node->mid_count++;
+            }
+            g_mutex_unlock(&node->lock);
+        }
+
+        /* Store the PC of what we are about to execute */
+        qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(insn,
+                                                            QEMU_PLUGIN_INLINE_STORE_U64,
+                                                            last_pc, ipc);
+    }
+}
+
+QEMU_PLUGIN_EXPORT
+int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
+                        int argc, char **argv)
+{
+    for (int i = 0; i < argc; i++) {
+        char *opt = argv[i];
+        g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
+        if (g_strcmp0(tokens[0], "sort") == 0) {
+            if (g_strcmp0(tokens[1], "hottest") == 0) {
+                report = SORT_HOTDEST;
+            } else if (g_strcmp0(tokens[1], "early") == 0) {
+                report = SORT_EARLY;
+            } else if (g_strcmp0(tokens[1], "popular") == 0) {
+                report = SORT_POPDEST;
+            } else {
+                fprintf(stderr, "failed to parse: %s\n", tokens[1]);
+                return -1;
+            }
+        } else {
+            fprintf(stderr, "option parsing failed: %s\n", opt);
+            return -1;
+        }
+    }
+
+    plugin_init();
+
+    qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+    qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+    return 0;
+}
diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile
index 98a89d5c40..ea81fde2b5 100644
--- a/contrib/plugins/Makefile
+++ b/contrib/plugins/Makefile
@@ -29,6 +29,7 @@ NAMES += cache
 NAMES += drcov
 NAMES += ips
 NAMES += stoptrigger
+NAMES += cflow
 
 ifeq ($(CONFIG_WIN32),y)
 SO_SUFFIX := .dll
-- 
2.39.2


Re: [RFC PATCH v3] contrib/plugins: control flow plugin
Posted by Pierrick Bouvier 1 month, 3 weeks ago
On 7/18/24 07:59, Alex Bennée wrote:
> This is a simple control flow tracking plugin that uses the latest
> inline and conditional operations to detect and track control flow
> changes. It is currently an exercise at seeing how useful the changes
> are.
> 
> Based-on: <20240312075428.244210-1-pierrick.bouvier@linaro.org>
> Cc: Gustavo Romero <gustavo.romero@linaro.org>
> Cc: Pierrick Bouvier <pierrick.bouvier@linaro.org>
> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> Message-Id: <20240311153432.1395190-1-alex.bennee@linaro.org>
> 
> ---
> v2
>    - only need a single call back
>    - drop need for INSN_WIDTH
>    - still don't understand the early exits
> 
> v3
>    - move initial STORE ops to first instruction to avoid confusion
>    with the conditional callback on the start
>    - filter out non-branches before processing
>    - fix off-by-one with accounting
>    - display "sync fault" or "branch" instead of raw numbers
> ---
>   contrib/plugins/cflow.c  | 364 +++++++++++++++++++++++++++++++++++++++
>   contrib/plugins/Makefile |   1 +
>   2 files changed, 365 insertions(+)
>   create mode 100644 contrib/plugins/cflow.c
> 
> diff --git a/contrib/plugins/cflow.c b/contrib/plugins/cflow.c
> new file mode 100644
> index 0000000000..e5b0d4bbbd
> --- /dev/null
> +++ b/contrib/plugins/cflow.c
> @@ -0,0 +1,364 @@
> +/*
> + * Control Flow plugin
> + *
> + * This plugin will track changes to control flow and detect where
> + * instructions fault.
> + *
> + * Copyright (c) 2024 Linaro Ltd
> + *
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + */
> +#include <glib.h>
> +#include <inttypes.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +#include <unistd.h>
> +
> +#include <qemu-plugin.h>
> +
> +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
> +
> +typedef enum {
> +    SORT_HOTDEST,  /* hottest branch */

s/HOTDEST/HOTTEST I guess. If not, s/hottest/hotdest in the rest of the 
code.

 From what I understand, it's not the hottest branch, but the hottest 
instruction (most frequently used as destination).

And popular, is the insn with the greatest number of exits (most 
frequently used as source).

How about SORT_HOTTEST_SOURCE_INSN, SOURCE_HOTTEST_DEST_INSN instead?

> +    SORT_EARLY,    /* most early exits */
> +    SORT_POPDEST,  /* most destinations */
> +} ReportType;
> +
> +ReportType report = SORT_HOTDEST;
> +int topn = 10;
> +
> +typedef struct {
> +    uint64_t daddr;
> +    uint64_t dcount;
> +} DestData;
> +
> +/* A node is an address where we can go to multiple places */
> +typedef struct {
> +    GMutex lock;
> +    /* address of the branch point */
> +    uint64_t addr;
> +    /* array of DestData */
> +    GArray *dests;
> +    /* early exit/fault count */
> +    uint64_t early_exit;
> +    /* jump destination count */
> +    uint64_t dest_count;
> +    /* instruction data */
> +    char *insn_disas;
> +    /* symbol? */
> +    const char *symbol;
> +    /* times translated as last in block? */
> +    int last_count;
> +    /* times translated in the middle of block? */
> +    int mid_count;
> +} NodeData;
> +
> +/* We use this to track the current execution state */
> +typedef struct {
> +    /* address of end of block */
> +    uint64_t end_block;
> +    /* next pc after end of block */
> +    uint64_t pc_after_block;
> +    /* address of last executed PC */
> +    uint64_t last_pc;
> +} VCPUScoreBoard;
> +
> +/* descriptors for accessing the above scoreboard */
> +static qemu_plugin_u64 end_block;
> +static qemu_plugin_u64 pc_after_block;
> +static qemu_plugin_u64 last_pc;
> +
> +
> +static GMutex node_lock;
> +static GHashTable *nodes;
> +struct qemu_plugin_scoreboard *state;
> +
> +/* SORT_HOTDEST */
> +static gint hottest(gconstpointer a, gconstpointer b)
> +{
> +    NodeData *na = (NodeData *) a;
> +    NodeData *nb = (NodeData *) b;
> +
> +    return na->dest_count > nb->dest_count ? -1 :
> +        na->dest_count == nb->dest_count ? 0 : 1;
> +}
> +
> +static gint early(gconstpointer a, gconstpointer b)
> +{
> +    NodeData *na = (NodeData *) a;
> +    NodeData *nb = (NodeData *) b;
> +
> +    return na->early_exit > nb->early_exit ? -1 :
> +        na->early_exit == nb->early_exit ? 0 : 1;
> +}
> +
> +static gint popular(gconstpointer a, gconstpointer b)
> +{
> +    NodeData *na = (NodeData *) a;
> +    NodeData *nb = (NodeData *) b;
> +
> +    return na->dests->len > nb->dests->len ? -1 :
> +        na->dests->len == nb->dests->len ? 0 : 1;
> +}
> +
> +/* Filter out non-branches - returns true to remove entry */
> +static gboolean filter_non_branches(gpointer key, gpointer value, gpointer user_data)
> +{
> +    NodeData *node = (NodeData *) value;
> +
> +    return node->dest_count == 0;
> +}
> +
> +static void plugin_exit(qemu_plugin_id_t id, void *p)
> +{
> +    g_autoptr(GString) result = g_string_new("collected ");
> +    GList *data;
> +    GCompareFunc sort = &hottest;
> +    int n = 0;
> +
> +    g_mutex_lock(&node_lock);
> +    g_string_append_printf(result, "%d control flow nodes in the hash table\n",
> +                           g_hash_table_size(nodes));
> +
> +    /* remove all nodes that didn't branch */
> +    g_hash_table_foreach_remove(nodes, filter_non_branches, NULL);
> +
> +    data = g_hash_table_get_values(nodes);
> +
> +    switch (report) {
> +    case SORT_HOTDEST:
> +        sort = &hottest;
> +        break;
> +    case SORT_EARLY:
> +        sort = &early;
> +        break;
> +    case SORT_POPDEST:
> +        sort = &popular;
> +        break;
> +    }
> +
> +    data = g_list_sort(data, sort);
> +
> +    for (GList *l = data;
> +         l != NULL && n < topn;
> +         l = l->next, n++) {
> +        NodeData *n = l->data;
> +        const char *type = n->mid_count ? "sync fault" : "branch";
> +        g_string_append_printf(result, "  addr: 0x%"PRIx64 " %s: %s (%s)\n",
> +                               n->addr, n->symbol, n->insn_disas, type);
> +        if (n->early_exit) {
> +            g_string_append_printf(result, "    early exits %"PRId64"\n",
> +                                   n->early_exit);
> +        }
> +        g_string_append_printf(result, "    branches %"PRId64"\n",
> +                               n->dest_count);
> +        for (int j = 0; j < n->dests->len; j++ ) {
> +            DestData *dd = &g_array_index(n->dests, DestData, j);
> +            g_string_append_printf(result, "      to 0x%"PRIx64" (%"PRId64")\n",
> +                                   dd->daddr, dd->dcount);
> +        }
> +    }
> +
> +    qemu_plugin_outs(result->str);
> +
> +    g_mutex_unlock(&node_lock);
> +}
> +
> +static void plugin_init(void)
> +{
> +    g_mutex_init(&node_lock);
> +    nodes = g_hash_table_new(NULL, g_direct_equal);
> +    state = qemu_plugin_scoreboard_new(sizeof(VCPUScoreBoard));
> +
> +    /* score board declarations */
> +    end_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, end_block);
> +    pc_after_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, pc_after_block);
> +    last_pc = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, last_pc);
> +}
> +
> +static NodeData *create_node(uint64_t addr)
> +{
> +    NodeData *node = g_new0(NodeData, 1);
> +    g_mutex_init(&node->lock);
> +    node->addr = addr;
> +    node->dests = g_array_new(true, true, sizeof(DestData));
> +    return node;
> +}
> +
> +static NodeData *fetch_node(uint64_t addr, bool create_if_not_found)
> +{
> +    NodeData *node = NULL;
> +
> +    g_mutex_lock(&node_lock);
> +    node = (NodeData *) g_hash_table_lookup(nodes, (gconstpointer) addr);
> +    if (!node && create_if_not_found) {
> +        node = create_node(addr);
> +        g_hash_table_insert(nodes, (gpointer) addr, (gpointer) node);
> +    }
> +    g_mutex_unlock(&node_lock);
> +    return node;
> +}
> +
> +/*
> + * Called when we detect a non-linear execution (pc !=
> + * pc_after_block). This could be due to a fault causing some sort of
> + * exit exception (if last_pc != block_end) or just a taken branch.
> + */
> +static void vcpu_tb_branched_exec(unsigned int cpu_index, void *udata)
> +{
> +    uint64_t lpc = qemu_plugin_u64_get(last_pc, cpu_index);
> +    uint64_t ebpc = qemu_plugin_u64_get(end_block, cpu_index);
> +    uint64_t npc = qemu_plugin_u64_get(pc_after_block, cpu_index);
> +    uint64_t pc = GPOINTER_TO_UINT(udata);
> +
> +    /* return early for address 0 */
> +    if (!lpc) {
> +        return;
> +    }
> +
> +    NodeData *node = fetch_node(lpc, true);

I would suggest a different approach here.

This plugin keeps data as a graph between instructions.
Another possibility would be to use a *per vcpu* hashtable, which simply 
associates the key (source_addr, dest_addr), to a number of hits.
(uint64, uint64) -> uint64. This is all we really need at exec time, the 
rest can be reconstructed for data gathered at translation time.

This way, you can do all the work in vcpu_tb_branched_exec without 
needing a single lock. (here, we lock twice, once globally to fetch all 
the nodes, and once for the node itself).

Then, at exit, you can merge hashtables from all vcpu, and do the work 
to rebuild the full graph from all transitions collected.

As a bonus, you can get the true list of hottest branches, when now, 
it's the hottest insn only you have.

The Node structure would simply becomes Insn, as you want to keep the 
pc, symbols and disassembly of every instruction.
And you need to keep track of all tb too, with length and pointing to 
the list of instructions.

It's a different paradigm from what is doing here, but I think it would 
scale much better, especially with multithreaded programs.

> +    DestData *data = NULL;
> +    bool early_exit = (lpc != ebpc);
> +    GArray *dests;
> +
> +    /* the condition should never hit */
> +    g_assert(pc != npc);
> +
> +    g_mutex_lock(&node->lock);
> +
> +    if (early_exit) {
> +        fprintf(stderr, "%s: pc=%"PRIx64", epbc=%"PRIx64
> +                " npc=%"PRIx64", lpc=%"PRIx64", \n",
> +                __func__, pc, ebpc, npc, lpc);
> +        node->early_exit++;
> +        if (!node->mid_count) {
> +            /* count now as we've only just allocated */
> +            node->mid_count++;
> +        }
> +    }
> +
> +    dests = node->dests;
> +    for (int i = 0; i < dests->len; i++) {
> +        if (g_array_index(dests, DestData, i).daddr == pc) {
> +            data = &g_array_index(dests, DestData, i);
> +        }
> +    }
> +
> +    /* we've never seen this before, allocate a new entry */
> +    if (!data) {
> +        DestData new_entry = { .daddr = pc };
> +        g_array_append_val(dests, new_entry);
> +        data = &g_array_index(dests, DestData, dests->len - 1);
> +        g_assert(data->daddr == pc);
> +    }
> +
> +    data->dcount++;
> +    node->dest_count++;
> +
> +    g_mutex_unlock(&node->lock);
> +}
> +
> +/*
> + * At the start of each block we need to resolve two things:
> + *
> + *  - is last_pc == block_end, if not we had an early exit
> + *  - is start of block last_pc + insn width, if not we jumped
> + *
> + * Once those are dealt with we can instrument the rest of the
> + * instructions for their execution.
> + *
> + */
> +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
> +{
> +    uint64_t pc = qemu_plugin_tb_vaddr(tb);
> +    size_t insns = qemu_plugin_tb_n_insns(tb);
> +    struct qemu_plugin_insn *first_insn = qemu_plugin_tb_get_insn(tb, 0);
> +    struct qemu_plugin_insn *last_insn = qemu_plugin_tb_get_insn(tb, insns - 1);
> +
> +    /*
> +     * check if we are executing linearly after the last block. We can
> +     * handle both early block exits and normal branches in the
> +     * callback if we hit it.
> +     */
> +    gpointer udata = GUINT_TO_POINTER(pc);
> +    qemu_plugin_register_vcpu_tb_exec_cond_cb(
> +        tb, vcpu_tb_branched_exec, QEMU_PLUGIN_CB_NO_REGS,
> +        QEMU_PLUGIN_COND_NE, pc_after_block, pc, udata);
> +
> +    /*
> +     * Now we can set start/end for this block so the next block can
> +     * check where we are at. Do this on the first instruction and not
> +     * the TB so we don't get mixed up with above.
> +     */
> +    qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn,
> +                                                      QEMU_PLUGIN_INLINE_STORE_U64,
> +                                                      end_block, qemu_plugin_insn_vaddr(last_insn));
> +    qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn,
> +                                                      QEMU_PLUGIN_INLINE_STORE_U64,
> +                                                      pc_after_block,
> +                                                      qemu_plugin_insn_vaddr(last_insn) +
> +                                                      qemu_plugin_insn_size(last_insn));
> +
> +    for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
> +        struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
> +        uint64_t ipc = qemu_plugin_insn_vaddr(insn);
> +        /*
> +         * If this is a potential branch point check if we could grab
> +         * the disassembly for it. If it is the last instruction
> +         * always create an entry.
> +         */
> +        NodeData *node = fetch_node(ipc, last_insn);
> +        if (node) {
> +            g_mutex_lock(&node->lock);
> +            if (!node->insn_disas) {
> +                node->insn_disas = qemu_plugin_insn_disas(insn);
> +            }
> +            if (!node->symbol) {
> +                node->symbol = qemu_plugin_insn_symbol(insn);
> +            }
> +            if (last_insn == insn) {
> +                node->last_count++;
> +            } else {
> +                node->mid_count++;
> +            }
> +            g_mutex_unlock(&node->lock);
> +        }
> +
> +        /* Store the PC of what we are about to execute */
> +        qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(insn,
> +                                                            QEMU_PLUGIN_INLINE_STORE_U64,
> +                                                            last_pc, ipc);
> +    }
> +}
> +
> +QEMU_PLUGIN_EXPORT
> +int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
> +                        int argc, char **argv)
> +{
> +    for (int i = 0; i < argc; i++) {
> +        char *opt = argv[i];
> +        g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
> +        if (g_strcmp0(tokens[0], "sort") == 0) {
> +            if (g_strcmp0(tokens[1], "hottest") == 0) {
> +                report = SORT_HOTDEST;
> +            } else if (g_strcmp0(tokens[1], "early") == 0) {
> +                report = SORT_EARLY;
> +            } else if (g_strcmp0(tokens[1], "popular") == 0) {
> +                report = SORT_POPDEST;
> +            } else {
> +                fprintf(stderr, "failed to parse: %s\n", tokens[1]);
> +                return -1;
> +            }
> +        } else {
> +            fprintf(stderr, "option parsing failed: %s\n", opt);
> +            return -1;
> +        }
> +    }
> +
> +    plugin_init();
> +
> +    qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
> +    qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
> +    return 0;
> +}
> diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile
> index 98a89d5c40..ea81fde2b5 100644
> --- a/contrib/plugins/Makefile
> +++ b/contrib/plugins/Makefile
> @@ -29,6 +29,7 @@ NAMES += cache
>   NAMES += drcov
>   NAMES += ips
>   NAMES += stoptrigger
> +NAMES += cflow
>   
>   ifeq ($(CONFIG_WIN32),y)
>   SO_SUFFIX := .dll
Re: [RFC PATCH v3] contrib/plugins: control flow plugin
Posted by Alex Bennée 1 month, 2 weeks ago
Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:

> On 7/18/24 07:59, Alex Bennée wrote:
>> This is a simple control flow tracking plugin that uses the latest
>> inline and conditional operations to detect and track control flow
>> changes. It is currently an exercise at seeing how useful the changes
>> are.
>> Based-on: <20240312075428.244210-1-pierrick.bouvier@linaro.org>
>> Cc: Gustavo Romero <gustavo.romero@linaro.org>
>> Cc: Pierrick Bouvier <pierrick.bouvier@linaro.org>
>> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
>> Message-Id: <20240311153432.1395190-1-alex.bennee@linaro.org>
<snip>
>> +/*
>> + * Called when we detect a non-linear execution (pc !=
>> + * pc_after_block). This could be due to a fault causing some sort of
>> + * exit exception (if last_pc != block_end) or just a taken branch.
>> + */
>> +static void vcpu_tb_branched_exec(unsigned int cpu_index, void *udata)
>> +{
>> +    uint64_t lpc = qemu_plugin_u64_get(last_pc, cpu_index);
>> +    uint64_t ebpc = qemu_plugin_u64_get(end_block, cpu_index);
>> +    uint64_t npc = qemu_plugin_u64_get(pc_after_block, cpu_index);
>> +    uint64_t pc = GPOINTER_TO_UINT(udata);
>> +
>> +    /* return early for address 0 */
>> +    if (!lpc) {
>> +        return;
>> +    }
>> +
>> +    NodeData *node = fetch_node(lpc, true);
>
> I would suggest a different approach here.
>
> This plugin keeps data as a graph between instructions.
> Another possibility would be to use a *per vcpu* hashtable, which
> simply associates the key (source_addr, dest_addr), to a number of
> hits.
> (uint64, uint64) -> uint64. This is all we really need at exec time,
> the rest can be reconstructed for data gathered at translation time.

Hmm I'm not sure how to deal with 128 bit keys with glib's hash table
implementation. I think the gpointer can be an opaque pointer though
with GEqualFunc to compare - but adding multiple records to a hash table
seems wrong.

> This way, you can do all the work in vcpu_tb_branched_exec without
> needing a single lock. (here, we lock twice, once globally to fetch
> all the nodes, and once for the node itself).
>
> Then, at exit, you can merge hashtables from all vcpu, and do the work
> to rebuild the full graph from all transitions collected.

Well a lot of transitions are just continuations (although maybe not I
guess I need to check that hunch).

> As a bonus, you can get the true list of hottest branches, when now,
> it's the hottest insn only you have.

I'm not sure I follow. Are you saying there are control flow changes I
don't detect? The fall-through cases?

> The Node structure would simply becomes Insn, as you want to keep the
> pc, symbols and disassembly of every instruction.
> And you need to keep track of all tb too, with length and pointing to
> the list of instructions.

What would I do with the TB information that I couldn't encode in Insn
at translation time?

>
> It's a different paradigm from what is doing here, but I think it
> would scale much better, especially with multithreaded programs.
>
>> +    DestData *data = NULL;
>> +    bool early_exit = (lpc != ebpc);
>> +    GArray *dests;
>> +
>> +    /* the condition should never hit */
>> +    g_assert(pc != npc);
>> +
>> +    g_mutex_lock(&node->lock);
>> +
>> +    if (early_exit) {
>> +        fprintf(stderr, "%s: pc=%"PRIx64", epbc=%"PRIx64
>> +                " npc=%"PRIx64", lpc=%"PRIx64", \n",
>> +                __func__, pc, ebpc, npc, lpc);
>> +        node->early_exit++;
>> +        if (!node->mid_count) {
>> +            /* count now as we've only just allocated */
>> +            node->mid_count++;
>> +        }
>> +    }
>> +
>> +    dests = node->dests;
>> +    for (int i = 0; i < dests->len; i++) {
>> +        if (g_array_index(dests, DestData, i).daddr == pc) {
>> +            data = &g_array_index(dests, DestData, i);
>> +        }
>> +    }
>> +
>> +    /* we've never seen this before, allocate a new entry */
>> +    if (!data) {
>> +        DestData new_entry = { .daddr = pc };
>> +        g_array_append_val(dests, new_entry);
>> +        data = &g_array_index(dests, DestData, dests->len - 1);
>> +        g_assert(data->daddr == pc);
>> +    }
>> +
>> +    data->dcount++;
>> +    node->dest_count++;
>> +
>> +    g_mutex_unlock(&node->lock);
>> +}
>> +
>> +/*
>> + * At the start of each block we need to resolve two things:
>> + *
>> + *  - is last_pc == block_end, if not we had an early exit
>> + *  - is start of block last_pc + insn width, if not we jumped
>> + *
>> + * Once those are dealt with we can instrument the rest of the
>> + * instructions for their execution.
>> + *
>> + */
>> +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
>> +{
>> +    uint64_t pc = qemu_plugin_tb_vaddr(tb);
>> +    size_t insns = qemu_plugin_tb_n_insns(tb);
>> +    struct qemu_plugin_insn *first_insn = qemu_plugin_tb_get_insn(tb, 0);
>> +    struct qemu_plugin_insn *last_insn = qemu_plugin_tb_get_insn(tb, insns - 1);
>> +
>> +    /*
>> +     * check if we are executing linearly after the last block. We can
>> +     * handle both early block exits and normal branches in the
>> +     * callback if we hit it.
>> +     */
>> +    gpointer udata = GUINT_TO_POINTER(pc);
>> +    qemu_plugin_register_vcpu_tb_exec_cond_cb(
>> +        tb, vcpu_tb_branched_exec, QEMU_PLUGIN_CB_NO_REGS,
>> +        QEMU_PLUGIN_COND_NE, pc_after_block, pc, udata);
>> +
>> +    /*
>> +     * Now we can set start/end for this block so the next block can
>> +     * check where we are at. Do this on the first instruction and not
>> +     * the TB so we don't get mixed up with above.
>> +     */
>> +    qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn,
>> +                                                      QEMU_PLUGIN_INLINE_STORE_U64,
>> +                                                      end_block, qemu_plugin_insn_vaddr(last_insn));
>> +    qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn,
>> +                                                      QEMU_PLUGIN_INLINE_STORE_U64,
>> +                                                      pc_after_block,
>> +                                                      qemu_plugin_insn_vaddr(last_insn) +
>> +                                                      qemu_plugin_insn_size(last_insn));
>> +
>> +    for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
>> +        struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
>> +        uint64_t ipc = qemu_plugin_insn_vaddr(insn);
>> +        /*
>> +         * If this is a potential branch point check if we could grab
>> +         * the disassembly for it. If it is the last instruction
>> +         * always create an entry.
>> +         */
>> +        NodeData *node = fetch_node(ipc, last_insn);
>> +        if (node) {
>> +            g_mutex_lock(&node->lock);
>> +            if (!node->insn_disas) {
>> +                node->insn_disas = qemu_plugin_insn_disas(insn);
>> +            }
>> +            if (!node->symbol) {
>> +                node->symbol = qemu_plugin_insn_symbol(insn);
>> +            }
>> +            if (last_insn == insn) {
>> +                node->last_count++;
>> +            } else {
>> +                node->mid_count++;
>> +            }
>> +            g_mutex_unlock(&node->lock);
>> +        }
>> +
>> +        /* Store the PC of what we are about to execute */
>> +        qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(insn,
>> +                                                            QEMU_PLUGIN_INLINE_STORE_U64,
>> +                                                            last_pc, ipc);
>> +    }
>> +}
>> +
>> +QEMU_PLUGIN_EXPORT
>> +int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
>> +                        int argc, char **argv)
>> +{
>> +    for (int i = 0; i < argc; i++) {
>> +        char *opt = argv[i];
>> +        g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
>> +        if (g_strcmp0(tokens[0], "sort") == 0) {
>> +            if (g_strcmp0(tokens[1], "hottest") == 0) {
>> +                report = SORT_HOTDEST;
>> +            } else if (g_strcmp0(tokens[1], "early") == 0) {
>> +                report = SORT_EARLY;
>> +            } else if (g_strcmp0(tokens[1], "popular") == 0) {
>> +                report = SORT_POPDEST;
>> +            } else {
>> +                fprintf(stderr, "failed to parse: %s\n", tokens[1]);
>> +                return -1;
>> +            }
>> +        } else {
>> +            fprintf(stderr, "option parsing failed: %s\n", opt);
>> +            return -1;
>> +        }
>> +    }
>> +
>> +    plugin_init();
>> +
>> +    qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
>> +    qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
>> +    return 0;
>> +}
>> diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile
>> index 98a89d5c40..ea81fde2b5 100644
>> --- a/contrib/plugins/Makefile
>> +++ b/contrib/plugins/Makefile
>> @@ -29,6 +29,7 @@ NAMES += cache
>>   NAMES += drcov
>>   NAMES += ips
>>   NAMES += stoptrigger
>> +NAMES += cflow
>>     ifeq ($(CONFIG_WIN32),y)
>>   SO_SUFFIX := .dll

-- 
Alex Bennée
Virtualisation Tech Lead @ Linaro
Re: [RFC PATCH v3] contrib/plugins: control flow plugin
Posted by Pierrick Bouvier 1 month, 2 weeks ago
On 7/19/24 03:07, Alex Bennée wrote:
> Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:
> 
>> On 7/18/24 07:59, Alex Bennée wrote:
>>> This is a simple control flow tracking plugin that uses the latest
>>> inline and conditional operations to detect and track control flow
>>> changes. It is currently an exercise at seeing how useful the changes
>>> are.
>>> Based-on: <20240312075428.244210-1-pierrick.bouvier@linaro.org>
>>> Cc: Gustavo Romero <gustavo.romero@linaro.org>
>>> Cc: Pierrick Bouvier <pierrick.bouvier@linaro.org>
>>> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
>>> Message-Id: <20240311153432.1395190-1-alex.bennee@linaro.org>
> <snip>
>>> +/*
>>> + * Called when we detect a non-linear execution (pc !=
>>> + * pc_after_block). This could be due to a fault causing some sort of
>>> + * exit exception (if last_pc != block_end) or just a taken branch.
>>> + */
>>> +static void vcpu_tb_branched_exec(unsigned int cpu_index, void *udata)
>>> +{
>>> +    uint64_t lpc = qemu_plugin_u64_get(last_pc, cpu_index);
>>> +    uint64_t ebpc = qemu_plugin_u64_get(end_block, cpu_index);
>>> +    uint64_t npc = qemu_plugin_u64_get(pc_after_block, cpu_index);
>>> +    uint64_t pc = GPOINTER_TO_UINT(udata);
>>> +
>>> +    /* return early for address 0 */
>>> +    if (!lpc) {
>>> +        return;
>>> +    }
>>> +
>>> +    NodeData *node = fetch_node(lpc, true);
>>
>> I would suggest a different approach here.
>>
>> This plugin keeps data as a graph between instructions.
>> Another possibility would be to use a *per vcpu* hashtable, which
>> simply associates the key (source_addr, dest_addr), to a number of
>> hits.
>> (uint64, uint64) -> uint64. This is all we really need at exec time,
>> the rest can be reconstructed for data gathered at translation time.
> 
> Hmm I'm not sure how to deal with 128 bit keys with glib's hash table
> implementation. I think the gpointer can be an opaque pointer though
> with GEqualFunc to compare - but adding multiple records to a hash table
> seems wrong.
> 
For types out of ints/strings as keys, we simply need to write a custom 
hash/equal function for this struct.
As long as your data has some properties, you can combine hashes from 
every field using a simple xor.
There are smarter implementations (using magic constants), but for our 
use case, it's enough.

A hash function always returning 0 (or creating lot of collisions) will 
not create any duplicate, but performance will be bad as a new key will 
always be compared to all others. That's why writing good hash functions 
is considered as being something hard, and why hashtables are a tricky 
structure.

It's pretty easy to find if there is a problem though, by profiling a 
run, and see how long you spend calling edge_equal.

struct edge {
   uint64_t src_addr;
   uint64_t dest_addr;
};

guint edge_hash(gconstpointer v)
{
   struct edge *e = (struct edge *)v;
   /* we combine hashes using XOR, no problem with commutativity
   (edges are one-way) or identity (a -> a does not exist) */
   return g_int64_hash(&e->src_addr) ^ g_int64_hash(&e->dest_addr);
}

gboolean edge_equal(gconstpointer v1, gconstpointer v2)
{
   struct edge *e1 = (struct edge *)v1;
   struct edge *e2 = (struct edge *)v2;
   return e1->src_addr == e2->src_addr && e1->dest_addr == e2->dest_addr;
}

...

GHashTable *edges = g_hash_table_new(edge_hash, edge_equal);

>> This way, you can do all the work in vcpu_tb_branched_exec without
>> needing a single lock. (here, we lock twice, once globally to fetch
>> all the nodes, and once for the node itself).
>>
>> Then, at exit, you can merge hashtables from all vcpu, and do the work
>> to rebuild the full graph from all transitions collected.
> 
> Well a lot of transitions are just continuations (although maybe not I
> guess I need to check that hunch).
>

It can still be seen as a transition between two basic blocks, and part 
of a normal control flow graph. I'm not sure if we want to eliminate those.

>> As a bonus, you can get the true list of hottest branches, when now,
>> it's the hottest insn only you have.
> 
> I'm not sure I follow. Are you saying there are control flow changes I
> don't detect? The fall-through cases?
> 

Since the plugin is instruction_address based, we never have the list of 
most used edges. We simply sort by instruction (mostly used as source or 
as dest), and list edges from this instruction.

It's different from having the 10 edges that have been the most used.

Note: you can rebuild this information at plugin exit from what you have 
(using struct edge defined above), just wanted to point that 
SORT_HOTTEST is not the "hottest branches" like the comment says.

Both approaches are valid (insn vs edge), but keeping track of edges 
only at runtime is the fastest way (and lockless thanks to scoreboards).

>> The Node structure would simply becomes Insn, as you want to keep the
>> pc, symbols and disassembly of every instruction.
>> And you need to keep track of all tb too, with length and pointing to
>> the list of instructions.
> 
> What would I do with the TB information that I couldn't encode in Insn
> at translation time?
> 

We define a tb struct, where we dump info obtained at translation time.

struct BasicBlock {
  	uint64_t addr;
	uint64_t size;
	GArray *insns; /* array of struct Insn * */
	...
}

/* pc -> bb */
bbs = g_hash_table_new(g_int64_hash, g_int64_equal);

>>
>> It's a different paradigm from what is doing here, but I think it
>> would scale much better, especially with multithreaded programs.
>>
>>> +    DestData *data = NULL;
>>> +    bool early_exit = (lpc != ebpc);
>>> +    GArray *dests;
>>> +
>>> +    /* the condition should never hit */
>>> +    g_assert(pc != npc);
>>> +
>>> +    g_mutex_lock(&node->lock);
>>> +
>>> +    if (early_exit) {
>>> +        fprintf(stderr, "%s: pc=%"PRIx64", epbc=%"PRIx64
>>> +                " npc=%"PRIx64", lpc=%"PRIx64", \n",
>>> +                __func__, pc, ebpc, npc, lpc);
>>> +        node->early_exit++;
>>> +        if (!node->mid_count) {
>>> +            /* count now as we've only just allocated */
>>> +            node->mid_count++;
>>> +        }
>>> +    }
>>> +
>>> +    dests = node->dests;
>>> +    for (int i = 0; i < dests->len; i++) {
>>> +        if (g_array_index(dests, DestData, i).daddr == pc) {
>>> +            data = &g_array_index(dests, DestData, i);
>>> +        }
>>> +    }
>>> +
>>> +    /* we've never seen this before, allocate a new entry */
>>> +    if (!data) {
>>> +        DestData new_entry = { .daddr = pc };
>>> +        g_array_append_val(dests, new_entry);
>>> +        data = &g_array_index(dests, DestData, dests->len - 1);
>>> +        g_assert(data->daddr == pc);
>>> +    }
>>> +
>>> +    data->dcount++;
>>> +    node->dest_count++;
>>> +
>>> +    g_mutex_unlock(&node->lock);
>>> +}
>>> +
>>> +/*
>>> + * At the start of each block we need to resolve two things:
>>> + *
>>> + *  - is last_pc == block_end, if not we had an early exit
>>> + *  - is start of block last_pc + insn width, if not we jumped
>>> + *
>>> + * Once those are dealt with we can instrument the rest of the
>>> + * instructions for their execution.
>>> + *
>>> + */
>>> +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
>>> +{
>>> +    uint64_t pc = qemu_plugin_tb_vaddr(tb);
>>> +    size_t insns = qemu_plugin_tb_n_insns(tb);
>>> +    struct qemu_plugin_insn *first_insn = qemu_plugin_tb_get_insn(tb, 0);
>>> +    struct qemu_plugin_insn *last_insn = qemu_plugin_tb_get_insn(tb, insns - 1);
>>> +
>>> +    /*
>>> +     * check if we are executing linearly after the last block. We can
>>> +     * handle both early block exits and normal branches in the
>>> +     * callback if we hit it.
>>> +     */
>>> +    gpointer udata = GUINT_TO_POINTER(pc);
>>> +    qemu_plugin_register_vcpu_tb_exec_cond_cb(
>>> +        tb, vcpu_tb_branched_exec, QEMU_PLUGIN_CB_NO_REGS,
>>> +        QEMU_PLUGIN_COND_NE, pc_after_block, pc, udata);
>>> +
>>> +    /*
>>> +     * Now we can set start/end for this block so the next block can
>>> +     * check where we are at. Do this on the first instruction and not
>>> +     * the TB so we don't get mixed up with above.
>>> +     */
>>> +    qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn,
>>> +                                                      QEMU_PLUGIN_INLINE_STORE_U64,
>>> +                                                      end_block, qemu_plugin_insn_vaddr(last_insn));
>>> +    qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn,
>>> +                                                      QEMU_PLUGIN_INLINE_STORE_U64,
>>> +                                                      pc_after_block,
>>> +                                                      qemu_plugin_insn_vaddr(last_insn) +
>>> +                                                      qemu_plugin_insn_size(last_insn));
>>> +
>>> +    for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
>>> +        struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
>>> +        uint64_t ipc = qemu_plugin_insn_vaddr(insn);
>>> +        /*
>>> +         * If this is a potential branch point check if we could grab
>>> +         * the disassembly for it. If it is the last instruction
>>> +         * always create an entry.
>>> +         */
>>> +        NodeData *node = fetch_node(ipc, last_insn);
>>> +        if (node) {
>>> +            g_mutex_lock(&node->lock);
>>> +            if (!node->insn_disas) {
>>> +                node->insn_disas = qemu_plugin_insn_disas(insn);
>>> +            }
>>> +            if (!node->symbol) {
>>> +                node->symbol = qemu_plugin_insn_symbol(insn);
>>> +            }
>>> +            if (last_insn == insn) {
>>> +                node->last_count++;
>>> +            } else {
>>> +                node->mid_count++;
>>> +            }
>>> +            g_mutex_unlock(&node->lock);
>>> +        }
>>> +
>>> +        /* Store the PC of what we are about to execute */
>>> +        qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(insn,
>>> +                                                            QEMU_PLUGIN_INLINE_STORE_U64,
>>> +                                                            last_pc, ipc);
>>> +    }
>>> +}
>>> +
>>> +QEMU_PLUGIN_EXPORT
>>> +int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
>>> +                        int argc, char **argv)
>>> +{
>>> +    for (int i = 0; i < argc; i++) {
>>> +        char *opt = argv[i];
>>> +        g_auto(GStrv) tokens = g_strsplit(opt, "=", 2);
>>> +        if (g_strcmp0(tokens[0], "sort") == 0) {
>>> +            if (g_strcmp0(tokens[1], "hottest") == 0) {
>>> +                report = SORT_HOTDEST;
>>> +            } else if (g_strcmp0(tokens[1], "early") == 0) {
>>> +                report = SORT_EARLY;
>>> +            } else if (g_strcmp0(tokens[1], "popular") == 0) {
>>> +                report = SORT_POPDEST;
>>> +            } else {
>>> +                fprintf(stderr, "failed to parse: %s\n", tokens[1]);
>>> +                return -1;
>>> +            }
>>> +        } else {
>>> +            fprintf(stderr, "option parsing failed: %s\n", opt);
>>> +            return -1;
>>> +        }
>>> +    }
>>> +
>>> +    plugin_init();
>>> +
>>> +    qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
>>> +    qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
>>> +    return 0;
>>> +}
>>> diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile
>>> index 98a89d5c40..ea81fde2b5 100644
>>> --- a/contrib/plugins/Makefile
>>> +++ b/contrib/plugins/Makefile
>>> @@ -29,6 +29,7 @@ NAMES += cache
>>>    NAMES += drcov
>>>    NAMES += ips
>>>    NAMES += stoptrigger
>>> +NAMES += cflow
>>>      ifeq ($(CONFIG_WIN32),y)
>>>    SO_SUFFIX := .dll
>