The mmap_lock is held around tb_gen_code. While the comment
is correct that the lock is dropped when tb_gen_code runs out
of memory, the lock is *not* dropped when an exception is
raised reading code for translation.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/cpu-exec.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 711859d4d4..7887af6f45 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -523,13 +523,11 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
- /*
- * The mmap_lock is dropped by tb_gen_code if it runs out of
- * memory.
- */
#ifndef CONFIG_SOFTMMU
clear_helper_retaddr();
- tcg_debug_assert(!have_mmap_lock());
+ if (have_mmap_lock()) {
+ mmap_unlock();
+ }
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
@@ -936,7 +934,9 @@ int cpu_exec(CPUState *cpu)
#ifndef CONFIG_SOFTMMU
clear_helper_retaddr();
- tcg_debug_assert(!have_mmap_lock());
+ if (have_mmap_lock()) {
+ mmap_unlock();
+ }
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
--
2.34.1