CMD_TLBI_S2_IPA: As S1+S2 is not enabled, for now this can be the
same as CMD_TLBI_NH_VAA.
CMD_TLBI_S12_VMALL: Added new function to invalidate TLB by VMID.
For stage-1 only commands, add a check to to throw CERROR_ILL if used
when stage-1 is not supported.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
---
Changes in v2:
- Add checks for stage-1 only commands
- Rename smmuv3_s1_range_inval to smmuv3_range_inval
---
hw/arm/smmu-common.c | 16 ++++++++++++
hw/arm/smmuv3.c | 47 +++++++++++++++++++++++++++++++-----
hw/arm/trace-events | 4 ++-
include/hw/arm/smmu-common.h | 1 +
4 files changed, 61 insertions(+), 7 deletions(-)
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index 3191a008c6..e4b477af10 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -135,6 +135,16 @@ static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value,
return SMMU_IOTLB_ASID(*iotlb_key) == asid;
}
+
+static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ uint16_t vmid = *(uint16_t *)user_data;
+ SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
+
+ return SMMU_IOTLB_VMID(*iotlb_key) == vmid;
+}
+
static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value,
gpointer user_data)
{
@@ -187,6 +197,12 @@ void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid);
}
+inline void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid)
+{
+ trace_smmu_iotlb_inv_vmid(vmid);
+ g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid);
+}
+
/* VMSAv8-64 Translation */
/**
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index f9c06723f9..8c76a48c8d 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -1034,7 +1034,7 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
}
}
-static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
+static void smmuv3_range_inval(SMMUState *s, Cmd *cmd)
{
dma_addr_t end, addr = CMD_ADDR(cmd);
uint8_t type = CMD_TYPE(cmd);
@@ -1059,7 +1059,7 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
}
if (!tg) {
- trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
+ trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1);
smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl);
return;
@@ -1077,7 +1077,7 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
num_pages = (mask + 1) >> granule;
- trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
+ trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl);
addr += mask + 1;
@@ -1211,12 +1211,22 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
{
uint16_t asid = CMD_ASID(&cmd);
+ if (!STAGE1_SUPPORTED(s)) {
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
+
trace_smmuv3_cmdq_tlbi_nh_asid(asid);
smmu_inv_notifiers_all(&s->smmu_state);
smmu_iotlb_inv_asid(bs, asid);
break;
}
case SMMU_CMD_TLBI_NH_ALL:
+ if (!STAGE1_SUPPORTED(s)) {
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
+ QEMU_FALLTHROUGH;
case SMMU_CMD_TLBI_NSNH_ALL:
trace_smmuv3_cmdq_tlbi_nh();
smmu_inv_notifiers_all(&s->smmu_state);
@@ -1224,7 +1234,34 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
break;
case SMMU_CMD_TLBI_NH_VAA:
case SMMU_CMD_TLBI_NH_VA:
- smmuv3_s1_range_inval(bs, &cmd);
+ if (!STAGE1_SUPPORTED(s)) {
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
+ smmuv3_range_inval(bs, &cmd);
+ break;
+ case SMMU_CMD_TLBI_S12_VMALL:
+ uint16_t vmid = CMD_VMID(&cmd);
+
+ if (!STAGE2_SUPPORTED(s)) {
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
+
+ trace_smmuv3_cmdq_tlbi_s12_vmid(vmid);
+ smmu_inv_notifiers_all(&s->smmu_state);
+ smmu_iotlb_inv_vmid(bs, vmid);
+ break;
+ case SMMU_CMD_TLBI_S2_IPA:
+ if (!STAGE2_SUPPORTED(s)) {
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
+ /*
+ * As currently only either s1 or s2 are supported
+ * we can reuse same function for s2.
+ */
+ smmuv3_range_inval(bs, &cmd);
break;
case SMMU_CMD_TLBI_EL3_ALL:
case SMMU_CMD_TLBI_EL3_VA:
@@ -1232,8 +1269,6 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
case SMMU_CMD_TLBI_EL2_ASID:
case SMMU_CMD_TLBI_EL2_VA:
case SMMU_CMD_TLBI_EL2_VAA:
- case SMMU_CMD_TLBI_S12_VMALL:
- case SMMU_CMD_TLBI_S2_IPA:
case SMMU_CMD_ATC_INV:
case SMMU_CMD_PRI_RESP:
case SMMU_CMD_RESUME:
diff --git a/hw/arm/trace-events b/hw/arm/trace-events
index 705104e58b..f8fdf1ca9f 100644
--- a/hw/arm/trace-events
+++ b/hw/arm/trace-events
@@ -12,6 +12,7 @@ smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, ui
smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64
smmu_iotlb_inv_all(void) "IOTLB invalidate all"
smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d"
+smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d"
smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64
smmu_inv_notifiers_mr(const char *name) "iommu mr=%s"
smmu_iotlb_lookup_hit(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
@@ -45,9 +46,10 @@ smmuv3_cmdq_cfgi_ste_range(int start, int end) "start=0x%x - end=0x%x"
smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x"
smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)"
smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)"
-smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d"
+smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d"
smmuv3_cmdq_tlbi_nh(void) ""
smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d"
+smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d"
smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x"
smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s"
smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s"
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
index c415e8d853..d8b458379e 100644
--- a/include/hw/arm/smmu-common.h
+++ b/include/hw/arm/smmu-common.h
@@ -186,6 +186,7 @@ SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova,
uint8_t tg, uint8_t level);
void smmu_iotlb_inv_all(SMMUState *s);
void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid);
+void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid);
void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
uint8_t tg, uint64_t num_pages, uint8_t ttl);
--
2.39.2.637.g21b0678d19-goog
Hi Mostafa, On 2/26/23 23:06, Mostafa Saleh wrote: > CMD_TLBI_S2_IPA: As S1+S2 is not enabled, for now this can be the > same as CMD_TLBI_NH_VAA. > > CMD_TLBI_S12_VMALL: Added new function to invalidate TLB by VMID. > > For stage-1 only commands, add a check to to throw CERROR_ILL if used s/to to/to > when stage-1 is not supported. > > Signed-off-by: Mostafa Saleh <smostafa@google.com> > --- > Changes in v2: > - Add checks for stage-1 only commands > - Rename smmuv3_s1_range_inval to smmuv3_range_inval > --- > hw/arm/smmu-common.c | 16 ++++++++++++ > hw/arm/smmuv3.c | 47 +++++++++++++++++++++++++++++++----- > hw/arm/trace-events | 4 ++- > include/hw/arm/smmu-common.h | 1 + > 4 files changed, 61 insertions(+), 7 deletions(-) > > diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c > index 3191a008c6..e4b477af10 100644 > --- a/hw/arm/smmu-common.c > +++ b/hw/arm/smmu-common.c > @@ -135,6 +135,16 @@ static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, > > return SMMU_IOTLB_ASID(*iotlb_key) == asid; > } > + > +static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value, > + gpointer user_data) > +{ > + uint16_t vmid = *(uint16_t *)user_data; > + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; > + > + return SMMU_IOTLB_VMID(*iotlb_key) == vmid; > +} > + > static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value, > gpointer user_data) > { > @@ -187,6 +197,12 @@ void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid) > g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); > } > > +inline void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid) > +{ > + trace_smmu_iotlb_inv_vmid(vmid); > + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); > +} > + > /* VMSAv8-64 Translation */ > > /** > diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c > index f9c06723f9..8c76a48c8d 100644 > --- a/hw/arm/smmuv3.c > +++ b/hw/arm/smmuv3.c > @@ -1034,7 +1034,7 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, > } > } > > -static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) > +static void smmuv3_range_inval(SMMUState *s, Cmd *cmd) > { > dma_addr_t end, addr = CMD_ADDR(cmd); > uint8_t type = CMD_TYPE(cmd); > @@ -1059,7 +1059,7 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) > } > > if (!tg) { > - trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); > + trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); > smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1); > smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl); > return; > @@ -1077,7 +1077,7 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) > uint64_t mask = dma_aligned_pow2_mask(addr, end, 64); > > num_pages = (mask + 1) >> granule; > - trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); > + trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); > smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages); > smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl); > addr += mask + 1; > @@ -1211,12 +1211,22 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) > { > uint16_t asid = CMD_ASID(&cmd); > > + if (!STAGE1_SUPPORTED(s)) { > + cmd_error = SMMU_CERROR_ILL; Well looking further this is not said explicitly this should return SMMU_CERROR_ILL. Maybe you should mark it as a guest error because we do not expect a guest to send such command when S1 is not supported? > + break; > + } > + > trace_smmuv3_cmdq_tlbi_nh_asid(asid); > smmu_inv_notifiers_all(&s->smmu_state); > smmu_iotlb_inv_asid(bs, asid); > break; > } > case SMMU_CMD_TLBI_NH_ALL: > + if (!STAGE1_SUPPORTED(s)) { > + cmd_error = SMMU_CERROR_ILL; > + break; there is a VMID field. Can't this be used in S2 mode as well? > + } > + QEMU_FALLTHROUGH; > case SMMU_CMD_TLBI_NSNH_ALL: > trace_smmuv3_cmdq_tlbi_nh(); > smmu_inv_notifiers_all(&s->smmu_state); > @@ -1224,7 +1234,34 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) > break; > case SMMU_CMD_TLBI_NH_VAA: > case SMMU_CMD_TLBI_NH_VA: > - smmuv3_s1_range_inval(bs, &cmd); > + if (!STAGE1_SUPPORTED(s)) { > + cmd_error = SMMU_CERROR_ILL; > + break; > + } > + smmuv3_range_inval(bs, &cmd); > + break; > + case SMMU_CMD_TLBI_S12_VMALL: > + uint16_t vmid = CMD_VMID(&cmd); > + > + if (!STAGE2_SUPPORTED(s)) { > + cmd_error = SMMU_CERROR_ILL; > + break; > + } > + > + trace_smmuv3_cmdq_tlbi_s12_vmid(vmid); > + smmu_inv_notifiers_all(&s->smmu_state); > + smmu_iotlb_inv_vmid(bs, vmid); > + break; > + case SMMU_CMD_TLBI_S2_IPA: > + if (!STAGE2_SUPPORTED(s)) { > + cmd_error = SMMU_CERROR_ILL; > + break; > + } > + /* > + * As currently only either s1 or s2 are supported > + * we can reuse same function for s2. > + */ > + smmuv3_range_inval(bs, &cmd); > break; > case SMMU_CMD_TLBI_EL3_ALL: > case SMMU_CMD_TLBI_EL3_VA: > @@ -1232,8 +1269,6 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) > case SMMU_CMD_TLBI_EL2_ASID: > case SMMU_CMD_TLBI_EL2_VA: > case SMMU_CMD_TLBI_EL2_VAA: > - case SMMU_CMD_TLBI_S12_VMALL: > - case SMMU_CMD_TLBI_S2_IPA: > case SMMU_CMD_ATC_INV: > case SMMU_CMD_PRI_RESP: > case SMMU_CMD_RESUME: > diff --git a/hw/arm/trace-events b/hw/arm/trace-events > index 705104e58b..f8fdf1ca9f 100644 > --- a/hw/arm/trace-events > +++ b/hw/arm/trace-events > @@ -12,6 +12,7 @@ smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, ui > smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 > smmu_iotlb_inv_all(void) "IOTLB invalidate all" > smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" > +smmu_iotlb_inv_vmid(uint16_t vmid) "IOTLB invalidate vmid=%d" > smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 > smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" > smmu_iotlb_lookup_hit(uint16_t asid, uint16_t vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" > @@ -45,9 +46,10 @@ smmuv3_cmdq_cfgi_ste_range(int start, int end) "start=0x%x - end=0x%x" > smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x" > smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" > smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" > -smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" > +smmuv3_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" > smmuv3_cmdq_tlbi_nh(void) "" > smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" > +smmuv3_cmdq_tlbi_s12_vmid(uint16_t vmid) "vmid=%d" > smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" > smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" > smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" > diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h > index c415e8d853..d8b458379e 100644 > --- a/include/hw/arm/smmu-common.h > +++ b/include/hw/arm/smmu-common.h > @@ -186,6 +186,7 @@ SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova, > uint8_t tg, uint8_t level); > void smmu_iotlb_inv_all(SMMUState *s); > void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid); > +void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid); > void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, > uint8_t tg, uint64_t num_pages, uint8_t ttl); > Thanks Eric
Hi Eric, On Mon, Mar 20, 2023 at 05:51:07PM +0100, Eric Auger wrote: > Hi Mostafa, > > On 2/26/23 23:06, Mostafa Saleh wrote: > > CMD_TLBI_S2_IPA: As S1+S2 is not enabled, for now this can be the > > same as CMD_TLBI_NH_VAA. > > > > CMD_TLBI_S12_VMALL: Added new function to invalidate TLB by VMID. > > > > For stage-1 only commands, add a check to to throw CERROR_ILL if used > s/to to/to Will do. > > @@ -1211,12 +1211,22 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) > > { > > uint16_t asid = CMD_ASID(&cmd); > > > > + if (!STAGE1_SUPPORTED(s)) { > > + cmd_error = SMMU_CERROR_ILL; > Well looking further this is not said explicitly this should return > > SMMU_CERROR_ILL. Maybe you should mark it as a guest error because we do not expect a guest to send such command when S1 is not supported? > I can add a check after the switch for SMMU_CERROR_ILL to log a guest error. > > + break; > > + } > > + > > trace_smmuv3_cmdq_tlbi_nh_asid(asid); > > smmu_inv_notifiers_all(&s->smmu_state); > > smmu_iotlb_inv_asid(bs, asid); > > break; > > } > > case SMMU_CMD_TLBI_NH_ALL: > > + if (!STAGE1_SUPPORTED(s)) { > > + cmd_error = SMMU_CERROR_ILL; > > + break; > > there is a VMID field. Can't this be used in S2 mode as well? According to the user manual "4.4.2 TLB invalidation of stage 1" CMD_TLBI_NH_ALL causes CERROR_ILL if stage-1 is not supported. Thanks, Mostafa
© 2016 - 2025 Red Hat, Inc.