Recent hw/risc/boot.c changes caused a regression in an use case with
the Xvisor hypervisor. Running a 32 bit QEMU guest with '-kernel'
stopped working. The reason seems to be that Xvisor is using 64 bit to
encode the 32 bit addresses from the guest, and load_elf_ram_sym() is
sign-extending the result with '1's [1].
Use a translate_fn() callback to be called by load_elf_ram_sym() and
return only the 32 bits address if we're running a 32 bit CPU.
[1] https://lists.gnu.org/archive/html/qemu-devel/2023-01/msg02281.html
Suggested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Suggested-by: Bin Meng <bmeng.cn@gmail.com>
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
---
hw/riscv/boot.c | 20 +++++++++++++++++++-
hw/riscv/microchip_pfsoc.c | 4 ++--
hw/riscv/opentitan.c | 3 ++-
hw/riscv/sifive_e.c | 3 ++-
hw/riscv/sifive_u.c | 4 ++--
hw/riscv/spike.c | 2 +-
hw/riscv/virt.c | 4 ++--
include/hw/riscv/boot.h | 1 +
target/riscv/cpu_bits.h | 1 +
9 files changed, 32 insertions(+), 10 deletions(-)
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
index e868fb6ade..0fd39df7f3 100644
--- a/hw/riscv/boot.c
+++ b/hw/riscv/boot.c
@@ -213,7 +213,24 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
}
}
+static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
+{
+ RISCVHartArrayState *harts = opaque;
+
+ /*
+ * For 32 bit CPUs, kernel_load_base is sign-extended (i.e.
+ * it can be padded with '1's) if the hypervisor is using
+ * 64 bit addresses with 32 bit guests.
+ */
+ if (riscv_is_32bit(harts)) {
+ return extract64(addr, 0, RV32_KERNEL_ADDR_LEN);
+ }
+
+ return addr;
+}
+
target_ulong riscv_load_kernel(MachineState *machine,
+ RISCVHartArrayState *harts,
target_ulong kernel_start_addr,
bool load_initrd,
symbol_fn_t sym_cb)
@@ -231,7 +248,8 @@ target_ulong riscv_load_kernel(MachineState *machine,
* the (expected) load address load address. This allows kernels to have
* separate SBI and ELF entry points (used by FreeBSD, for example).
*/
- if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL,
+ if (load_elf_ram_sym(kernel_filename, NULL,
+ translate_kernel_address, NULL,
NULL, &kernel_load_base, NULL, NULL, 0,
EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
kernel_entry = kernel_load_base;
diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c
index c45023a2b1..b7e171b605 100644
--- a/hw/riscv/microchip_pfsoc.c
+++ b/hw/riscv/microchip_pfsoc.c
@@ -629,8 +629,8 @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
firmware_end_addr);
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
- true, NULL);
+ kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
+ kernel_start_addr, true, NULL);
/* Compute the fdt load address in dram */
fdt_load_addr = riscv_load_fdt(memmap[MICROCHIP_PFSOC_DRAM_LO].base,
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
index f6fd9725a5..1404a52da0 100644
--- a/hw/riscv/opentitan.c
+++ b/hw/riscv/opentitan.c
@@ -101,7 +101,8 @@ static void opentitan_board_init(MachineState *machine)
}
if (machine->kernel_filename) {
- riscv_load_kernel(machine, memmap[IBEX_DEV_RAM].base, false, NULL);
+ riscv_load_kernel(machine, &s->soc.cpus,
+ memmap[IBEX_DEV_RAM].base, false, NULL);
}
}
diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c
index 6835d1c807..04939b60c3 100644
--- a/hw/riscv/sifive_e.c
+++ b/hw/riscv/sifive_e.c
@@ -114,7 +114,8 @@ static void sifive_e_machine_init(MachineState *machine)
memmap[SIFIVE_E_DEV_MROM].base, &address_space_memory);
if (machine->kernel_filename) {
- riscv_load_kernel(machine, memmap[SIFIVE_E_DEV_DTIM].base,
+ riscv_load_kernel(machine, &s->soc.cpus,
+ memmap[SIFIVE_E_DEV_DTIM].base,
false, NULL);
}
}
diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
index ccad386920..b0b3e6f03a 100644
--- a/hw/riscv/sifive_u.c
+++ b/hw/riscv/sifive_u.c
@@ -598,8 +598,8 @@ static void sifive_u_machine_init(MachineState *machine)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
firmware_end_addr);
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
- true, NULL);
+ kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
+ kernel_start_addr, true, NULL);
} else {
/*
* If dynamic firmware is used, it doesn't know where is the next mode
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
index 91bf194ec1..3c0ac916c0 100644
--- a/hw/riscv/spike.c
+++ b/hw/riscv/spike.c
@@ -305,7 +305,7 @@ static void spike_board_init(MachineState *machine)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
firmware_end_addr);
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
+ kernel_entry = riscv_load_kernel(machine, &s->soc[0], kernel_start_addr,
true, htif_symbol_callback);
} else {
/*
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index e374b58f89..cf64da65bf 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -1281,8 +1281,8 @@ static void virt_machine_done(Notifier *notifier, void *data)
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
firmware_end_addr);
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
- true, NULL);
+ kernel_entry = riscv_load_kernel(machine, &s->soc[0],
+ kernel_start_addr, true, NULL);
} else {
/*
* If dynamic firmware is used, it doesn't know where is the next mode
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
index cbd131bad7..bc9faed397 100644
--- a/include/hw/riscv/boot.h
+++ b/include/hw/riscv/boot.h
@@ -44,6 +44,7 @@ target_ulong riscv_load_firmware(const char *firmware_filename,
hwaddr firmware_load_addr,
symbol_fn_t sym_cb);
target_ulong riscv_load_kernel(MachineState *machine,
+ RISCVHartArrayState *harts,
target_ulong firmware_end_addr,
bool load_initrd,
symbol_fn_t sym_cb);
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index 8b0d7e20ea..8fcaeae342 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -751,6 +751,7 @@ typedef enum RISCVException {
#define MENVCFG_STCE (1ULL << 63)
/* For RV32 */
+#define RV32_KERNEL_ADDR_LEN 32
#define MENVCFGH_PBMTE BIT(30)
#define MENVCFGH_STCE BIT(31)
--
2.39.0
On Mon, Jan 16, 2023 at 8:30 PM Daniel Henrique Barboza
<dbarboza@ventanamicro.com> wrote:
>
> Recent hw/risc/boot.c changes caused a regression in an use case with
> the Xvisor hypervisor. Running a 32 bit QEMU guest with '-kernel'
> stopped working. The reason seems to be that Xvisor is using 64 bit to
> encode the 32 bit addresses from the guest, and load_elf_ram_sym() is
> sign-extending the result with '1's [1].
>
> Use a translate_fn() callback to be called by load_elf_ram_sym() and
> return only the 32 bits address if we're running a 32 bit CPU.
>
> [1] https://lists.gnu.org/archive/html/qemu-devel/2023-01/msg02281.html
>
> Suggested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> Suggested-by: Bin Meng <bmeng.cn@gmail.com>
> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> ---
> hw/riscv/boot.c | 20 +++++++++++++++++++-
> hw/riscv/microchip_pfsoc.c | 4 ++--
> hw/riscv/opentitan.c | 3 ++-
> hw/riscv/sifive_e.c | 3 ++-
> hw/riscv/sifive_u.c | 4 ++--
> hw/riscv/spike.c | 2 +-
> hw/riscv/virt.c | 4 ++--
> include/hw/riscv/boot.h | 1 +
> target/riscv/cpu_bits.h | 1 +
> 9 files changed, 32 insertions(+), 10 deletions(-)
>
> diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
> index e868fb6ade..0fd39df7f3 100644
> --- a/hw/riscv/boot.c
> +++ b/hw/riscv/boot.c
> @@ -213,7 +213,24 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
> }
> }
>
> +static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
> +{
> + RISCVHartArrayState *harts = opaque;
> +
> + /*
> + * For 32 bit CPUs, kernel_load_base is sign-extended (i.e.
> + * it can be padded with '1's) if the hypervisor is using
> + * 64 bit addresses with 32 bit guests.
This comment is not accurate. It has nothing to do with the hypervisor
using a 64-bit address. It's the ELF loader that is sign-extending the
32-bit address.
> + */
> + if (riscv_is_32bit(harts)) {
> + return extract64(addr, 0, RV32_KERNEL_ADDR_LEN);
> + }
> +
> + return addr;
> +}
> +
> target_ulong riscv_load_kernel(MachineState *machine,
> + RISCVHartArrayState *harts,
> target_ulong kernel_start_addr,
> bool load_initrd,
> symbol_fn_t sym_cb)
> @@ -231,7 +248,8 @@ target_ulong riscv_load_kernel(MachineState *machine,
> * the (expected) load address load address. This allows kernels to have
> * separate SBI and ELF entry points (used by FreeBSD, for example).
> */
> - if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL,
> + if (load_elf_ram_sym(kernel_filename, NULL,
> + translate_kernel_address, NULL,
> NULL, &kernel_load_base, NULL, NULL, 0,
> EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
> kernel_entry = kernel_load_base;
> diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c
> index c45023a2b1..b7e171b605 100644
> --- a/hw/riscv/microchip_pfsoc.c
> +++ b/hw/riscv/microchip_pfsoc.c
> @@ -629,8 +629,8 @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
> kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
> firmware_end_addr);
>
> - kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
> - true, NULL);
> + kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
> + kernel_start_addr, true, NULL);
>
> /* Compute the fdt load address in dram */
> fdt_load_addr = riscv_load_fdt(memmap[MICROCHIP_PFSOC_DRAM_LO].base,
> diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
> index f6fd9725a5..1404a52da0 100644
> --- a/hw/riscv/opentitan.c
> +++ b/hw/riscv/opentitan.c
> @@ -101,7 +101,8 @@ static void opentitan_board_init(MachineState *machine)
> }
>
> if (machine->kernel_filename) {
> - riscv_load_kernel(machine, memmap[IBEX_DEV_RAM].base, false, NULL);
> + riscv_load_kernel(machine, &s->soc.cpus,
> + memmap[IBEX_DEV_RAM].base, false, NULL);
> }
> }
>
> diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c
> index 6835d1c807..04939b60c3 100644
> --- a/hw/riscv/sifive_e.c
> +++ b/hw/riscv/sifive_e.c
> @@ -114,7 +114,8 @@ static void sifive_e_machine_init(MachineState *machine)
> memmap[SIFIVE_E_DEV_MROM].base, &address_space_memory);
>
> if (machine->kernel_filename) {
> - riscv_load_kernel(machine, memmap[SIFIVE_E_DEV_DTIM].base,
> + riscv_load_kernel(machine, &s->soc.cpus,
> + memmap[SIFIVE_E_DEV_DTIM].base,
> false, NULL);
> }
> }
> diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
> index ccad386920..b0b3e6f03a 100644
> --- a/hw/riscv/sifive_u.c
> +++ b/hw/riscv/sifive_u.c
> @@ -598,8 +598,8 @@ static void sifive_u_machine_init(MachineState *machine)
> kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
> firmware_end_addr);
>
> - kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
> - true, NULL);
> + kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
> + kernel_start_addr, true, NULL);
> } else {
> /*
> * If dynamic firmware is used, it doesn't know where is the next mode
> diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
> index 91bf194ec1..3c0ac916c0 100644
> --- a/hw/riscv/spike.c
> +++ b/hw/riscv/spike.c
> @@ -305,7 +305,7 @@ static void spike_board_init(MachineState *machine)
> kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
> firmware_end_addr);
>
> - kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
> + kernel_entry = riscv_load_kernel(machine, &s->soc[0], kernel_start_addr,
> true, htif_symbol_callback);
> } else {
> /*
> diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
> index e374b58f89..cf64da65bf 100644
> --- a/hw/riscv/virt.c
> +++ b/hw/riscv/virt.c
> @@ -1281,8 +1281,8 @@ static void virt_machine_done(Notifier *notifier, void *data)
> kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
> firmware_end_addr);
>
> - kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
> - true, NULL);
> + kernel_entry = riscv_load_kernel(machine, &s->soc[0],
> + kernel_start_addr, true, NULL);
> } else {
> /*
> * If dynamic firmware is used, it doesn't know where is the next mode
> diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
> index cbd131bad7..bc9faed397 100644
> --- a/include/hw/riscv/boot.h
> +++ b/include/hw/riscv/boot.h
> @@ -44,6 +44,7 @@ target_ulong riscv_load_firmware(const char *firmware_filename,
> hwaddr firmware_load_addr,
> symbol_fn_t sym_cb);
> target_ulong riscv_load_kernel(MachineState *machine,
> + RISCVHartArrayState *harts,
> target_ulong firmware_end_addr,
> bool load_initrd,
> symbol_fn_t sym_cb);
> diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
> index 8b0d7e20ea..8fcaeae342 100644
> --- a/target/riscv/cpu_bits.h
> +++ b/target/riscv/cpu_bits.h
> @@ -751,6 +751,7 @@ typedef enum RISCVException {
> #define MENVCFG_STCE (1ULL << 63)
>
> /* For RV32 */
> +#define RV32_KERNEL_ADDR_LEN 32
> #define MENVCFGH_PBMTE BIT(30)
> #define MENVCFGH_STCE BIT(31)
>
> --
Regards,
Bin
On 16/1/23 13:29, Daniel Henrique Barboza wrote:
> Recent hw/risc/boot.c changes caused a regression in an use case with
> the Xvisor hypervisor. Running a 32 bit QEMU guest with '-kernel'
> stopped working. The reason seems to be that Xvisor is using 64 bit to
> encode the 32 bit addresses from the guest, and load_elf_ram_sym() is
> sign-extending the result with '1's [1].
>
> Use a translate_fn() callback to be called by load_elf_ram_sym() and
> return only the 32 bits address if we're running a 32 bit CPU.
>
> [1] https://lists.gnu.org/archive/html/qemu-devel/2023-01/msg02281.html
>
> Suggested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> Suggested-by: Bin Meng <bmeng.cn@gmail.com>
> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> ---
> hw/riscv/boot.c | 20 +++++++++++++++++++-
> hw/riscv/microchip_pfsoc.c | 4 ++--
> hw/riscv/opentitan.c | 3 ++-
> hw/riscv/sifive_e.c | 3 ++-
> hw/riscv/sifive_u.c | 4 ++--
> hw/riscv/spike.c | 2 +-
> hw/riscv/virt.c | 4 ++--
> include/hw/riscv/boot.h | 1 +
> target/riscv/cpu_bits.h | 1 +
> 9 files changed, 32 insertions(+), 10 deletions(-)
>
> diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
> index e868fb6ade..0fd39df7f3 100644
> --- a/hw/riscv/boot.c
> +++ b/hw/riscv/boot.c
> @@ -213,7 +213,24 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
> }
> }
>
> +static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
> +{
> + RISCVHartArrayState *harts = opaque;
> +
> + /*
> + * For 32 bit CPUs, kernel_load_base is sign-extended (i.e.
> + * it can be padded with '1's) if the hypervisor is using
> + * 64 bit addresses with 32 bit guests.
> + */
> + if (riscv_is_32bit(harts)) {
Maybe move the comment within the if() and add " so remove the sign
extension by truncating to 32-bit".
> + return extract64(addr, 0, RV32_KERNEL_ADDR_LEN);
For 32-bit maybe a definition is not necessary, I asked before
you used 24-bit in the previous version. As the maintainer prefer :)
> + }
> +
> + return addr;
> +}
> diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
> index 8b0d7e20ea..8fcaeae342 100644
> --- a/target/riscv/cpu_bits.h
> +++ b/target/riscv/cpu_bits.h
> @@ -751,6 +751,7 @@ typedef enum RISCVException {
> #define MENVCFG_STCE (1ULL << 63)
>
> /* For RV32 */
> +#define RV32_KERNEL_ADDR_LEN 32
> #define MENVCFGH_PBMTE BIT(30)
> #define MENVCFGH_STCE BIT(31)
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
On Mon, Jan 16, 2023 at 8:37 PM Philippe Mathieu-Daudé
<philmd@linaro.org> wrote:
>
> On 16/1/23 13:29, Daniel Henrique Barboza wrote:
> > Recent hw/risc/boot.c changes caused a regression in an use case with
> > the Xvisor hypervisor. Running a 32 bit QEMU guest with '-kernel'
> > stopped working. The reason seems to be that Xvisor is using 64 bit to
> > encode the 32 bit addresses from the guest, and load_elf_ram_sym() is
> > sign-extending the result with '1's [1].
> >
> > Use a translate_fn() callback to be called by load_elf_ram_sym() and
> > return only the 32 bits address if we're running a 32 bit CPU.
> >
> > [1] https://lists.gnu.org/archive/html/qemu-devel/2023-01/msg02281.html
> >
> > Suggested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> > Suggested-by: Bin Meng <bmeng.cn@gmail.com>
> > Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> > ---
> > hw/riscv/boot.c | 20 +++++++++++++++++++-
> > hw/riscv/microchip_pfsoc.c | 4 ++--
> > hw/riscv/opentitan.c | 3 ++-
> > hw/riscv/sifive_e.c | 3 ++-
> > hw/riscv/sifive_u.c | 4 ++--
> > hw/riscv/spike.c | 2 +-
> > hw/riscv/virt.c | 4 ++--
> > include/hw/riscv/boot.h | 1 +
> > target/riscv/cpu_bits.h | 1 +
> > 9 files changed, 32 insertions(+), 10 deletions(-)
> >
> > diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
> > index e868fb6ade..0fd39df7f3 100644
> > --- a/hw/riscv/boot.c
> > +++ b/hw/riscv/boot.c
> > @@ -213,7 +213,24 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
> > }
> > }
> >
> > +static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
> > +{
> > + RISCVHartArrayState *harts = opaque;
> > +
> > + /*
> > + * For 32 bit CPUs, kernel_load_base is sign-extended (i.e.
> > + * it can be padded with '1's) if the hypervisor is using
> > + * 64 bit addresses with 32 bit guests.
> > + */
> > + if (riscv_is_32bit(harts)) {
>
> Maybe move the comment within the if() and add " so remove the sign
> extension by truncating to 32-bit".
>
> > + return extract64(addr, 0, RV32_KERNEL_ADDR_LEN);
>
> For 32-bit maybe a definition is not necessary, I asked before
> you used 24-bit in the previous version. As the maintainer prefer :)
>
> > + }
> > +
> > + return addr;
> > +}
>
> > diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
> > index 8b0d7e20ea..8fcaeae342 100644
> > --- a/target/riscv/cpu_bits.h
> > +++ b/target/riscv/cpu_bits.h
> > @@ -751,6 +751,7 @@ typedef enum RISCVException {
> > #define MENVCFG_STCE (1ULL << 63)
> >
> > /* For RV32 */
> > +#define RV32_KERNEL_ADDR_LEN 32
> > #define MENVCFGH_PBMTE BIT(30)
> > #define MENVCFGH_STCE BIT(31)
>
> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
>
Isn't the problem in the ELF loader?
Why does it return a 64-bit signed extended address given the 32-bit ELF image?
Regards,
Bin
On 1/16/23 09:37, Philippe Mathieu-Daudé wrote:
> On 16/1/23 13:29, Daniel Henrique Barboza wrote:
>> Recent hw/risc/boot.c changes caused a regression in an use case with
>> the Xvisor hypervisor. Running a 32 bit QEMU guest with '-kernel'
>> stopped working. The reason seems to be that Xvisor is using 64 bit to
>> encode the 32 bit addresses from the guest, and load_elf_ram_sym() is
>> sign-extending the result with '1's [1].
>>
>> Use a translate_fn() callback to be called by load_elf_ram_sym() and
>> return only the 32 bits address if we're running a 32 bit CPU.
>>
>> [1] https://lists.gnu.org/archive/html/qemu-devel/2023-01/msg02281.html
>>
>> Suggested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
>> Suggested-by: Bin Meng <bmeng.cn@gmail.com>
>> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
>> ---
>> hw/riscv/boot.c | 20 +++++++++++++++++++-
>> hw/riscv/microchip_pfsoc.c | 4 ++--
>> hw/riscv/opentitan.c | 3 ++-
>> hw/riscv/sifive_e.c | 3 ++-
>> hw/riscv/sifive_u.c | 4 ++--
>> hw/riscv/spike.c | 2 +-
>> hw/riscv/virt.c | 4 ++--
>> include/hw/riscv/boot.h | 1 +
>> target/riscv/cpu_bits.h | 1 +
>> 9 files changed, 32 insertions(+), 10 deletions(-)
>>
>> diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
>> index e868fb6ade..0fd39df7f3 100644
>> --- a/hw/riscv/boot.c
>> +++ b/hw/riscv/boot.c
>> @@ -213,7 +213,24 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
>> }
>> }
>> +static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
>> +{
>> + RISCVHartArrayState *harts = opaque;
>> +
>> + /*
>> + * For 32 bit CPUs, kernel_load_base is sign-extended (i.e.
>> + * it can be padded with '1's) if the hypervisor is using
>> + * 64 bit addresses with 32 bit guests.
>> + */
>> + if (riscv_is_32bit(harts)) {
>
> Maybe move the comment within the if() and add " so remove the sign
> extension by truncating to 32-bit".
>
>> + return extract64(addr, 0, RV32_KERNEL_ADDR_LEN);
>
> For 32-bit maybe a definition is not necessary, I asked before
> you used 24-bit in the previous version. As the maintainer prefer :)
That was unintentional. I missed a 'f' in that 0x0fffffff, which I noticed only
now when doing this version. It's curious because Alistair mentioned that
the patch apparently solved the bug ....
I don't mind creating a macro for the 32 bit value. If we decide it's unneeded
I can remove it and just put a '32' there. I'll also make the comment change
you mentioned above.
Thanks,
Daniel
>
>> + }
>> +
>> + return addr;
>> +}
>
>> diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
>> index 8b0d7e20ea..8fcaeae342 100644
>> --- a/target/riscv/cpu_bits.h
>> +++ b/target/riscv/cpu_bits.h
>> @@ -751,6 +751,7 @@ typedef enum RISCVException {
>> #define MENVCFG_STCE (1ULL << 63)
>> /* For RV32 */
>> +#define RV32_KERNEL_ADDR_LEN 32
>> #define MENVCFGH_PBMTE BIT(30)
>> #define MENVCFGH_STCE BIT(31)
>
> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
>
>
On Mon, Jan 16, 2023 at 10:46 PM Daniel Henrique Barboza
<dbarboza@ventanamicro.com> wrote:
>
>
>
> On 1/16/23 09:37, Philippe Mathieu-Daudé wrote:
> > On 16/1/23 13:29, Daniel Henrique Barboza wrote:
> >> Recent hw/risc/boot.c changes caused a regression in an use case with
> >> the Xvisor hypervisor. Running a 32 bit QEMU guest with '-kernel'
> >> stopped working. The reason seems to be that Xvisor is using 64 bit to
> >> encode the 32 bit addresses from the guest, and load_elf_ram_sym() is
> >> sign-extending the result with '1's [1].
> >>
> >> Use a translate_fn() callback to be called by load_elf_ram_sym() and
> >> return only the 32 bits address if we're running a 32 bit CPU.
> >>
> >> [1] https://lists.gnu.org/archive/html/qemu-devel/2023-01/msg02281.html
> >>
> >> Suggested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> >> Suggested-by: Bin Meng <bmeng.cn@gmail.com>
> >> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> >> ---
> >> hw/riscv/boot.c | 20 +++++++++++++++++++-
> >> hw/riscv/microchip_pfsoc.c | 4 ++--
> >> hw/riscv/opentitan.c | 3 ++-
> >> hw/riscv/sifive_e.c | 3 ++-
> >> hw/riscv/sifive_u.c | 4 ++--
> >> hw/riscv/spike.c | 2 +-
> >> hw/riscv/virt.c | 4 ++--
> >> include/hw/riscv/boot.h | 1 +
> >> target/riscv/cpu_bits.h | 1 +
> >> 9 files changed, 32 insertions(+), 10 deletions(-)
> >>
> >> diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
> >> index e868fb6ade..0fd39df7f3 100644
> >> --- a/hw/riscv/boot.c
> >> +++ b/hw/riscv/boot.c
> >> @@ -213,7 +213,24 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
> >> }
> >> }
> >> +static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
> >> +{
> >> + RISCVHartArrayState *harts = opaque;
> >> +
> >> + /*
> >> + * For 32 bit CPUs, kernel_load_base is sign-extended (i.e.
> >> + * it can be padded with '1's) if the hypervisor is using
> >> + * 64 bit addresses with 32 bit guests.
> >> + */
> >> + if (riscv_is_32bit(harts)) {
> >
> > Maybe move the comment within the if() and add " so remove the sign
> > extension by truncating to 32-bit".
> >
> >> + return extract64(addr, 0, RV32_KERNEL_ADDR_LEN);
> >
> > For 32-bit maybe a definition is not necessary, I asked before
> > you used 24-bit in the previous version. As the maintainer prefer :)
>
> That was unintentional. I missed a 'f' in that 0x0fffffff, which I noticed only
> now when doing this version. It's curious because Alistair mentioned that
> the patch apparently solved the bug ....
I never tested it, I'm not sure if this solves the problem or not.
This patch needs to be merged *before* the initrd patch (patch 1 of
this series) to avoid breaking users.
>
> I don't mind creating a macro for the 32 bit value. If we decide it's unneeded
> I can remove it and just put a '32' there. I'll also make the comment change
> you mentioned above.
I think 32 if fine, I don't think we need a macro
Alistair
>
>
> Thanks,
>
>
> Daniel
>
> >
> >> + }
> >> +
> >> + return addr;
> >> +}
> >
> >> diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
> >> index 8b0d7e20ea..8fcaeae342 100644
> >> --- a/target/riscv/cpu_bits.h
> >> +++ b/target/riscv/cpu_bits.h
> >> @@ -751,6 +751,7 @@ typedef enum RISCVException {
> >> #define MENVCFG_STCE (1ULL << 63)
> >> /* For RV32 */
> >> +#define RV32_KERNEL_ADDR_LEN 32
> >> #define MENVCFGH_PBMTE BIT(30)
> >> #define MENVCFGH_STCE BIT(31)
> >
> > Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> >
> >
>
>
On 1/18/23 19:45, Alistair Francis wrote:
> On Mon, Jan 16, 2023 at 10:46 PM Daniel Henrique Barboza
> <dbarboza@ventanamicro.com> wrote:
>>
>>
>> On 1/16/23 09:37, Philippe Mathieu-Daudé wrote:
>>> On 16/1/23 13:29, Daniel Henrique Barboza wrote:
>>>> Recent hw/risc/boot.c changes caused a regression in an use case with
>>>> the Xvisor hypervisor. Running a 32 bit QEMU guest with '-kernel'
>>>> stopped working. The reason seems to be that Xvisor is using 64 bit to
>>>> encode the 32 bit addresses from the guest, and load_elf_ram_sym() is
>>>> sign-extending the result with '1's [1].
>>>>
>>>> Use a translate_fn() callback to be called by load_elf_ram_sym() and
>>>> return only the 32 bits address if we're running a 32 bit CPU.
>>>>
>>>> [1] https://lists.gnu.org/archive/html/qemu-devel/2023-01/msg02281.html
>>>>
>>>> Suggested-by: Philippe Mathieu-Daudé <philmd@linaro.org>
>>>> Suggested-by: Bin Meng <bmeng.cn@gmail.com>
>>>> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
>>>> ---
>>>> hw/riscv/boot.c | 20 +++++++++++++++++++-
>>>> hw/riscv/microchip_pfsoc.c | 4 ++--
>>>> hw/riscv/opentitan.c | 3 ++-
>>>> hw/riscv/sifive_e.c | 3 ++-
>>>> hw/riscv/sifive_u.c | 4 ++--
>>>> hw/riscv/spike.c | 2 +-
>>>> hw/riscv/virt.c | 4 ++--
>>>> include/hw/riscv/boot.h | 1 +
>>>> target/riscv/cpu_bits.h | 1 +
>>>> 9 files changed, 32 insertions(+), 10 deletions(-)
>>>>
>>>> diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
>>>> index e868fb6ade..0fd39df7f3 100644
>>>> --- a/hw/riscv/boot.c
>>>> +++ b/hw/riscv/boot.c
>>>> @@ -213,7 +213,24 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
>>>> }
>>>> }
>>>> +static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
>>>> +{
>>>> + RISCVHartArrayState *harts = opaque;
>>>> +
>>>> + /*
>>>> + * For 32 bit CPUs, kernel_load_base is sign-extended (i.e.
>>>> + * it can be padded with '1's) if the hypervisor is using
>>>> + * 64 bit addresses with 32 bit guests.
>>>> + */
>>>> + if (riscv_is_32bit(harts)) {
>>> Maybe move the comment within the if() and add " so remove the sign
>>> extension by truncating to 32-bit".
>>>
>>>> + return extract64(addr, 0, RV32_KERNEL_ADDR_LEN);
>>> For 32-bit maybe a definition is not necessary, I asked before
>>> you used 24-bit in the previous version. As the maintainer prefer :)
>> That was unintentional. I missed a 'f' in that 0x0fffffff, which I noticed only
>> now when doing this version. It's curious because Alistair mentioned that
>> the patch apparently solved the bug ....
> I never tested it, I'm not sure if this solves the problem or not.
>
> This patch needs to be merged *before* the initrd patch (patch 1 of
> this series) to avoid breaking users.
Makes sense. I'll change it in v9.
Daniel
>> I don't mind creating a macro for the 32 bit value. If we decide it's unneeded
>> I can remove it and just put a '32' there. I'll also make the comment change
>> you mentioned above.
> I think 32 if fine, I don't think we need a macro
>
> Alistair
>
>>
>> Thanks,
>>
>>
>> Daniel
>>
>>>> + }
>>>> +
>>>> + return addr;
>>>> +}
>>>> diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
>>>> index 8b0d7e20ea..8fcaeae342 100644
>>>> --- a/target/riscv/cpu_bits.h
>>>> +++ b/target/riscv/cpu_bits.h
>>>> @@ -751,6 +751,7 @@ typedef enum RISCVException {
>>>> #define MENVCFG_STCE (1ULL << 63)
>>>> /* For RV32 */
>>>> +#define RV32_KERNEL_ADDR_LEN 32
>>>> #define MENVCFGH_PBMTE BIT(30)
>>>> #define MENVCFGH_STCE BIT(31)
>>> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
>>>
>>>
>>
© 2016 - 2026 Red Hat, Inc.