From nobody Thu Jul 3 16:07:37 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zoho.com; spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org; Return-Path: Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) by mx.zohomail.com with SMTPS id 1486047973105229.44416592048253; Thu, 2 Feb 2017 07:06:13 -0800 (PST) Received: from localhost ([::1]:57140 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1cZIxf-0000NR-0A for importer@patchew.org; Thu, 02 Feb 2017 10:06:11 -0500 Received: from eggs.gnu.org ([2001:4830:134:3::10]:35378) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1cZIU4-0005OJ-W2 for qemu-devel@nongnu.org; Thu, 02 Feb 2017 09:35:39 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1cZIU0-0005Rg-5N for qemu-devel@nongnu.org; Thu, 02 Feb 2017 09:35:36 -0500 Received: from bran.ispras.ru ([83.149.199.196]:39801 helo=smtp.ispras.ru) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1cZITz-0005OZ-KW for qemu-devel@nongnu.org; Thu, 02 Feb 2017 09:35:31 -0500 Received: from bulbul.intra.ispras.ru (spartak.intra.ispras.ru [10.10.3.51]) by smtp.ispras.ru (Postfix) with ESMTP id 0FDB260E76; Thu, 2 Feb 2017 17:35:31 +0300 (MSK) From: Kirill Batuzov To: qemu-devel@nongnu.org Date: Thu, 2 Feb 2017 17:34:56 +0300 Message-Id: <1486046099-17726-19-git-send-email-batuzovk@ispras.ru> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1486046099-17726-1-git-send-email-batuzovk@ispras.ru> References: <1486046099-17726-1-git-send-email-batuzovk@ispras.ru> X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x [fuzzy] X-Received-From: 83.149.199.196 Subject: [Qemu-devel] [PATCH v2.1 18/21] softmmu: create helpers for vector loads X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Peter Maydell , Peter Crosthwaite , Kirill Batuzov , Paolo Bonzini , =?UTF-8?q?Alex=20Benn=C3=A9e?= , Richard Henderson Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail: RSF_0 Z_629925259 SPT_0 Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Signed-off-by: Kirill Batuzov --- cputlb.c | 4 + softmmu_template_vector.h | 266 ++++++++++++++++++++++++++++++++++++++++++= ++++ tcg/tcg.h | 5 + 3 files changed, 275 insertions(+) create mode 100644 softmmu_template_vector.h diff --git a/cputlb.c b/cputlb.c index 6c39927..41c9a01 100644 --- a/cputlb.c +++ b/cputlb.c @@ -660,6 +660,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, targ= et_ulong addr, #define DATA_SIZE 8 #include "softmmu_template.h" =20 +#define SHIFT 4 +#include "softmmu_template_vector.h" +#undef MMUSUFFIX + /* First set of helpers allows passing in of OI and RETADDR. This makes them callable from other helpers. */ =20 diff --git a/softmmu_template_vector.h b/softmmu_template_vector.h new file mode 100644 index 0000000..b286d65 --- /dev/null +++ b/softmmu_template_vector.h @@ -0,0 +1,266 @@ +/* + * Software MMU support + * + * Generate helpers used by TCG for qemu_ld/st vector ops and code + * load functions. + * + * Included from target op helpers and exec.c. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/timer.h" +#include "exec/address-spaces.h" +#include "exec/memory.h" + +#define DATA_SIZE (1 << SHIFT) + +#if DATA_SIZE =3D=3D 16 +#define SUFFIX v128 +#else +#error unsupported data size +#endif + + +#ifdef SOFTMMU_CODE_ACCESS +#define READ_ACCESS_TYPE MMU_INST_FETCH +#define ADDR_READ addr_code +#else +#define READ_ACCESS_TYPE MMU_DATA_LOAD +#define ADDR_READ addr_read +#endif + +#define helper_te_ld_name glue(glue(helper_te_ld, SUFFIX), MMUSUFFIX) +#define helper_te_st_name glue(glue(helper_te_st, SUFFIX), MMUSUFFIX) + +#ifndef SOFTMMU_CODE_ACCESS +static inline void glue(io_read, SUFFIX)(CPUArchState *env, + CPUIOTLBEntry *iotlbentry, + target_ulong addr, + uintptr_t retaddr, + uint8_t *res) +{ + CPUState *cpu =3D ENV_GET_CPU(env); + hwaddr physaddr =3D iotlbentry->addr; + MemoryRegion *mr =3D iotlb_to_region(cpu, physaddr, iotlbentry->attrs); + int i; + + assert(0); /* Needs testing */ + + physaddr =3D (physaddr & TARGET_PAGE_MASK) + addr; + cpu->mem_io_pc =3D retaddr; + if (mr !=3D &io_mem_rom && mr !=3D &io_mem_notdirty && !cpu->can_do_io= ) { + cpu_io_recompile(cpu, retaddr); + } + + cpu->mem_io_vaddr =3D addr; + for (i =3D 0; i < (1 << SHIFT); i +=3D 8) { + memory_region_dispatch_read(mr, physaddr + i, (uint64_t *)(res + i= ), + 8, iotlbentry->attrs); + } +} +#endif + +void helper_te_ld_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr, uint8_t *res) +{ + unsigned mmu_idx =3D get_mmuidx(oi); + int index =3D (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr =3D env->tlb_table[mmu_idx][index].ADDR_READ; + uintptr_t haddr; + int i; + + /* Adjust the given return address. */ + retaddr -=3D GETPC_ADJ; + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + !=3D (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if ((addr & (DATA_SIZE - 1)) !=3D 0 + && (get_memop(oi) & MO_AMASK) =3D=3D MO_ALIGN) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + tlb_addr =3D env->tlb_table[mmu_idx][index].ADDR_READ; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + CPUIOTLBEntry *iotlbentry; + if ((addr & (DATA_SIZE - 1)) !=3D 0) { + goto do_unaligned_access; + } + iotlbentry =3D &env->iotlb[mmu_idx][index]; + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. = */ + glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr, res); + return ; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >=3D TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + uint8_t res1[DATA_SIZE * 2]; + unsigned shift; + do_unaligned_access: + if ((get_memop(oi) & MO_AMASK) =3D=3D MO_ALIGN) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + addr1 =3D addr & ~(DATA_SIZE - 1); + addr2 =3D addr1 + DATA_SIZE; + /* Note the adjustment at the beginning of the function. + Undo that for the recursion. */ + helper_te_ld_name(env, addr1, oi, retaddr + GETPC_ADJ, res1); + helper_te_ld_name(env, addr2, oi, retaddr + GETPC_ADJ, + res1 + DATA_SIZE); + shift =3D addr & (DATA_SIZE - 1); + + for (i =3D 0; i < DATA_SIZE; i++) { + res[i] =3D res1[i + shift]; + } + return; + } + + /* Handle aligned access or unaligned access in the same page. */ + if ((addr & (DATA_SIZE - 1)) !=3D 0 + && (get_memop(oi) & MO_AMASK) =3D=3D MO_ALIGN) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + + haddr =3D addr + env->tlb_table[mmu_idx][index].addend; + for (i =3D 0; i < DATA_SIZE; i++) { + res[i] =3D ((uint8_t *)haddr)[i]; + } +} + +#ifndef SOFTMMU_CODE_ACCESS + +static inline void glue(io_write, SUFFIX)(CPUArchState *env, + CPUIOTLBEntry *iotlbentry, + uint8_t *val, + target_ulong addr, + uintptr_t retaddr) +{ + CPUState *cpu =3D ENV_GET_CPU(env); + hwaddr physaddr =3D iotlbentry->addr; + MemoryRegion *mr =3D iotlb_to_region(cpu, physaddr, iotlbentry->attrs); + int i; + + assert(0); /* Needs testing */ + + physaddr =3D (physaddr & TARGET_PAGE_MASK) + addr; + if (mr !=3D &io_mem_rom && mr !=3D &io_mem_notdirty && !cpu->can_do_io= ) { + cpu_io_recompile(cpu, retaddr); + } + + cpu->mem_io_vaddr =3D addr; + cpu->mem_io_pc =3D retaddr; + for (i =3D 0; i < (1 << SHIFT); i +=3D 8) { + memory_region_dispatch_write(mr, physaddr + i, *(uint64_t *)(val += i), + 8, iotlbentry->attrs); + } +} + +void helper_te_st_name(CPUArchState *env, target_ulong addr, uint8_t *val, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + unsigned mmu_idx =3D get_mmuidx(oi); + int index =3D (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr =3D env->tlb_table[mmu_idx][index].addr_write; + uintptr_t haddr; + int i; + + /* Adjust the given return address. */ + retaddr -=3D GETPC_ADJ; + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + !=3D (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if ((addr & (DATA_SIZE - 1)) !=3D 0 + && (get_memop(oi) & MO_AMASK) =3D=3D MO_ALIGN) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + if (!VICTIM_TLB_HIT(addr_write, addr)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, reta= ddr); + } + tlb_addr =3D env->tlb_table[mmu_idx][index].addr_write; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + CPUIOTLBEntry *iotlbentry; + if ((addr & (DATA_SIZE - 1)) !=3D 0) { + goto do_unaligned_access; + } + iotlbentry =3D &env->iotlb[mmu_idx][index]; + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. = */ + glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); + return; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >=3D TARGET_PAGE_SIZE)) { + int i; + do_unaligned_access: + if ((get_memop(oi) & MO_AMASK) =3D=3D MO_ALIGN) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + /* XXX: not efficient, but simple */ + /* Note: relies on the fact that tlb_fill() does not remove the + * previous page from the TLB cache. */ + for (i =3D DATA_SIZE - 1; i >=3D 0; i--) { + /* Note the adjustment at the beginning of the function. + Undo that for the recursion. */ + glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val[i], + oi, retaddr + GETPC_ADJ); + } + return; + } + + /* Handle aligned access or unaligned access in the same page. */ + if ((addr & (DATA_SIZE - 1)) !=3D 0 + && (get_memop(oi) & MO_AMASK) =3D=3D MO_ALIGN) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + haddr =3D addr + env->tlb_table[mmu_idx][index].addend; + for (i =3D 0; i < DATA_SIZE; i++) { + ((uint8_t *)haddr)[i] =3D val[i]; + } +} + +#endif /* !defined(SOFTMMU_CODE_ACCESS) */ + +#undef READ_ACCESS_TYPE +#undef SHIFT +#undef SUFFIX +#undef DATA_SIZE +#undef ADDR_READ +#undef helper_te_ld_name +#undef helper_te_st_name diff --git a/tcg/tcg.h b/tcg/tcg.h index 63a83f9..8dee5c2 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -1330,6 +1330,11 @@ uint32_t helper_be_ldl_cmmu(CPUArchState *env, targe= t_ulong addr, uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); =20 +void helper_te_ldv128_mmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr, uint8_t *res); +void helper_te_stv128_mmu(CPUArchState *env, target_ulong addr, uint8_t *v= al, + TCGMemOpIdx oi, uintptr_t retaddr); + /* Temporary aliases until backends are converted. */ #ifdef TARGET_WORDS_BIGENDIAN # define helper_ret_ldsw_mmu helper_be_ldsw_mmu --=20 2.1.4