Hi Tao,
On 10/12/25 8:06 AM, Tao Tang wrote:
According to the Arm architecture, SMMU-originated memory accesses,
such as fetching commands or writing events for a secure stream, must
target the Secure Physical Address (PA) space. The existing model sends
all DMA to the global non-secure address_space_memory.
This patch introduces the infrastructure to differentiate between secure
and non-secure memory accesses. Firstly, SMMU_SEC_SID_S is added in
SMMUSecSID enum to represent the secure context. Then a weak global
symbol, arm_secure_address_space, is added, which can be provided by the
machine model to represent the Secure PA space.
A new helper, smmu_get_address_space(), selects the target address
space based on SEC_SID. All internal DMA calls
(dma_memory_read/write) will be updated to use this helper in follow-up
patches.
Signed-off-by: Tao Tang <[email protected]>
---
hw/arm/smmu-common.c | 8 ++++++++
hw/arm/virt.c | 5 +++++
include/hw/arm/smmu-common.h | 27 +++++++++++++++++++++++++++
3 files changed, 40 insertions(+)
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index 62a7612184..24db448683 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -30,6 +30,14 @@
#include "hw/arm/smmu-common.h"
#include "smmu-internal.h"
+/* Global state for secure address space availability */
+bool arm_secure_as_available;
+
+void smmu_enable_secure_address_space(void)
+{
+ arm_secure_as_available = true;
+}
+
/* IOTLB Management */
static guint smmu_iotlb_key_hash(gconstpointer v)
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 175023897a..83dc62a095 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -92,6 +92,8 @@
#include "hw/cxl/cxl_host.h"
#include "qemu/guest-random.h"
+AddressSpace arm_secure_address_space;
+
static GlobalProperty arm_virt_compat[] = {
{ TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "48" },
};
@@ -2257,6 +2259,9 @@ static void machvirt_init(MachineState *machine)
memory_region_init(secure_sysmem, OBJECT(machine), "secure-memory",
UINT64_MAX);
memory_region_add_subregion_overlap(secure_sysmem, 0, sysmem, -1);
+ address_space_init(&arm_secure_address_space, secure_sysmem,
+ "secure-memory-space");
+ smmu_enable_secure_address_space();
}
firmware_loaded = virt_firmware_init(vms, sysmem,
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
index b0dae18a62..d54558f94b 100644
--- a/include/hw/arm/smmu-common.h
+++ b/include/hw/arm/smmu-common.h
@@ -43,9 +43,36 @@
/* StreamID Security state */
typedef enum SMMUSecSID {
SMMU_SEC_SID_NS = 0,
+ SMMU_SEC_SID_S,
SMMU_SEC_SID_NUM,
} SMMUSecSID;
+extern AddressSpace __attribute__((weak)) arm_secure_address_space;
+extern bool arm_secure_as_available;
+void smmu_enable_secure_address_space(void);
+
+/*
+ * Return the address space corresponding to the SEC_SID.
+ * If SEC_SID is Secure, but secure address space is not available,
+ * return NULL and print a warning message.
+ */
+static inline AddressSpace *smmu_get_address_space(SMMUSecSID sec_sid)
+{
+ switch (sec_sid) {
+ case SMMU_SEC_SID_NS:
+ return &address_space_memory;
+ case SMMU_SEC_SID_S:
+ if (!arm_secure_as_available || arm_secure_address_space.root == NULL)
{
+ printf("Secure address space requested but not available");
+ return NULL;
+ }
+ return &arm_secure_address_space;
+ default:
+ printf("Unknown SEC_SID value %d", sec_sid);
+ return NULL;
+ }
+}
+
/*
* Page table walk error types
*/
I ran into the same issue, when adding Granule Protection Check to the
SMMU, for RME support. It requires access to secure memory, where
Granule Protection Table is kept, and thus, access secure address space.
After talking with Richard and Philippe, I have been suggested a better
way. Similar to how arm cpus handle this, boards (virt & sbsa-ref) are
simply passing pointers to MemoryRegion for global and secure memory.
Then, the SMMU can create its own address spaces, based on those regions.
It's clean, does not require any weak variable, and mimic what is
already done for cpus. Please see the two patches attached.
First one define properties, and pass memory regions from boards to
SMMU. Second one replace global address spaces with SMMU ones.
I'll send patch 1 as it's own series, and you can take inspiration from
patch 2 for this series. SMMU unit tests will need to be modified to be
passed the memory regions also.
Regards,
Pierrick
From 918a003547e8f31b572726123bb8bf4f8466db0c Mon Sep 17 00:00:00 2001
From: Pierrick Bouvier <[email protected]>
Date: Thu, 11 Dec 2025 12:33:43 -0800
Subject: [PATCH 1/2] hw/arm/smmu: add memory regions as property for an SMMU
instance
This will be used to access non-secure and secure memory. Secure support
and Granule Protection Check (for RME) for SMMU need to access secure
memory.
As well, it allows to remove usage of global address_space_memory,
allowing different SMMU instances to have a specific view of memory.
Signed-off-by: Pierrick Bouvier <[email protected]>
---
include/hw/arm/smmu-common.h | 4 ++++
hw/arm/sbsa-ref.c | 16 ++++++++++++----
hw/arm/smmu-common.c | 24 ++++++++++++++++++++++++
hw/arm/virt.c | 16 +++++++++++-----
4 files changed, 51 insertions(+), 9 deletions(-)
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
index a6bdb67a983..0f08ae080c9 100644
--- a/include/hw/arm/smmu-common.h
+++ b/include/hw/arm/smmu-common.h
@@ -227,6 +227,10 @@ struct SMMUState {
uint8_t bus_num;
PCIBus *primary_bus;
bool smmu_per_bus; /* SMMU is specific to the primary_bus */
+ MemoryRegion *memory;
+ AddressSpace as_memory;
+ MemoryRegion *secure_memory;
+ AddressSpace as_secure_memory;
};
struct SMMUBaseClass {
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
index 45d2e3e946d..840b1a216f4 100644
--- a/hw/arm/sbsa-ref.c
+++ b/hw/arm/sbsa-ref.c
@@ -616,7 +616,9 @@ static void create_xhci(const SBSAMachineState *sms)
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(sms->gic, irq));
}
-static void create_smmu(const SBSAMachineState *sms, PCIBus *bus)
+static void create_smmu(const SBSAMachineState *sms, PCIBus *bus,
+ MemoryRegion *sysmem,
+ MemoryRegion *secure_sysmem)
{
hwaddr base = sbsa_ref_memmap[SBSA_SMMU].base;
int irq = sbsa_ref_irqmap[SBSA_SMMU];
@@ -628,6 +630,10 @@ static void create_smmu(const SBSAMachineState *sms, PCIBus *bus)
object_property_set_str(OBJECT(dev), "stage", "nested", &error_abort);
object_property_set_link(OBJECT(dev), "primary-bus", OBJECT(bus),
&error_abort);
+ object_property_set_link(OBJECT(dev), "memory", OBJECT(sysmem),
+ &error_abort);
+ object_property_set_link(OBJECT(dev), "secure-memory", OBJECT(secure_sysmem),
+ &error_abort);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
for (i = 0; i < NUM_SMMU_IRQS; i++) {
@@ -636,7 +642,9 @@ static void create_smmu(const SBSAMachineState *sms, PCIBus *bus)
}
}
-static void create_pcie(SBSAMachineState *sms)
+static void create_pcie(SBSAMachineState *sms,
+ MemoryRegion *sysmem,
+ MemoryRegion *secure_sysmem)
{
hwaddr base_ecam = sbsa_ref_memmap[SBSA_PCIE_ECAM].base;
hwaddr size_ecam = sbsa_ref_memmap[SBSA_PCIE_ECAM].size;
@@ -692,7 +700,7 @@ static void create_pcie(SBSAMachineState *sms)
pci_create_simple(pci->bus, -1, "bochs-display");
- create_smmu(sms, pci->bus);
+ create_smmu(sms, pci->bus, sysmem, secure_sysmem);
}
static void *sbsa_ref_dtb(const struct arm_boot_info *binfo, int *fdt_size)
@@ -831,7 +839,7 @@ static void sbsa_ref_init(MachineState *machine)
create_xhci(sms);
- create_pcie(sms);
+ create_pcie(sms, sysmem, secure_sysmem);
create_secure_ec(secure_sysmem);
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index 66367adc2a4..5fbfe825fd0 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -1171,6 +1171,12 @@ static void smmu_base_realize(DeviceState *dev, Error **errp)
return;
}
+ g_assert(s->memory);
+ address_space_init(&s->as_memory, s->memory, "memory");
+ if (s->secure_memory) {
+ address_space_init(&s->as_secure_memory, s->secure_memory, "secure-memory");
+ }
+
/*
* We only allow default PCIe Root Complex(pcie.0) or pxb-pcie based extra
* root complexes to be associated with SMMU.
@@ -1235,10 +1241,28 @@ static void smmu_base_class_init(ObjectClass *klass, const void *data)
rc->phases.exit = smmu_base_reset_exit;
}
+static void smmu_base_instance_init(Object *obj)
+{
+ SMMUState *s = ARM_SMMU(obj);
+
+ object_property_add_link(obj, "memory",
+ TYPE_MEMORY_REGION,
+ (Object **)&s->memory,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+
+ object_property_add_link(obj, "secure-memory",
+ TYPE_MEMORY_REGION,
+ (Object **)&s->secure_memory,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+}
+
static const TypeInfo smmu_base_info = {
.name = TYPE_ARM_SMMU,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(SMMUState),
+ .instance_init = smmu_base_instance_init,
.class_data = NULL,
.class_size = sizeof(SMMUBaseClass),
.class_init = smmu_base_class_init,
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 5d205eff3a1..d446c3349e9 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -1514,8 +1514,9 @@ static void create_smmuv3_dev_dtb(VirtMachineState *vms,
0x0, vms->iommu_phandle, 0x0, 0x10000);
}
-static void create_smmu(const VirtMachineState *vms,
- PCIBus *bus)
+static void create_smmu(const VirtMachineState *vms, PCIBus *bus,
+ MemoryRegion *sysmem,
+ MemoryRegion *secure_sysmem)
{
VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
int irq = vms->irqmap[VIRT_SMMU];
@@ -1549,6 +1550,10 @@ static void create_smmu(const VirtMachineState *vms,
object_property_set_str(OBJECT(dev), "stage", stage, &error_fatal);
object_property_set_link(OBJECT(dev), "primary-bus", OBJECT(bus),
&error_abort);
+ object_property_set_link(OBJECT(dev), "memory", OBJECT(sysmem),
+ &error_abort);
+ object_property_set_link(OBJECT(dev), "secure-memory", OBJECT(secure_sysmem),
+ &error_abort);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
for (i = 0; i < NUM_SMMU_IRQS; i++) {
@@ -1587,7 +1592,8 @@ static void create_virtio_iommu_dt_bindings(VirtMachineState *vms)
}
}
-static void create_pcie(VirtMachineState *vms)
+static void create_pcie(VirtMachineState *vms,
+ MemoryRegion *sysmem, MemoryRegion *secure_sysmem)
{
hwaddr base_mmio = vms->memmap[VIRT_PCIE_MMIO].base;
hwaddr size_mmio = vms->memmap[VIRT_PCIE_MMIO].size;
@@ -1706,7 +1712,7 @@ static void create_pcie(VirtMachineState *vms)
switch (vms->iommu) {
case VIRT_IOMMU_SMMUV3:
- create_smmu(vms, vms->bus);
+ create_smmu(vms, vms->bus, sysmem, secure_sysmem);
if (!vms->default_bus_bypass_iommu) {
qemu_fdt_setprop_cells(ms->fdt, nodename, "iommu-map",
0x0, vms->iommu_phandle, 0x0, 0x10000);
@@ -2520,7 +2526,7 @@ static void machvirt_init(MachineState *machine)
create_rtc(vms);
- create_pcie(vms);
+ create_pcie(vms, sysmem, secure_sysmem);
create_cxl_host_reg_region(vms);
if (aarch64 && firmware_loaded && virt_is_acpi_enabled(vms)) {
--
2.47.3
From f6d8e41c02caaf0b9af73dc54de48d7b97ae1354 Mon Sep 17 00:00:00 2001
From: Pierrick Bouvier <[email protected]>
Date: Thu, 11 Dec 2025 13:04:25 -0800
Subject: [PATCH 2/2] hw/arm/smmu: use SMMU address spaces to access memory
Signed-off-by: Pierrick Bouvier <[email protected]>
---
include/hw/arm/smmu-common.h | 49 ++++++++++++++++++------------------
hw/arm/smmu-common.c | 21 ++++++++--------
hw/arm/smmuv3.c | 23 +++++++++--------
3 files changed, 49 insertions(+), 44 deletions(-)
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
index 0f08ae080c9..3ee853ccdd9 100644
--- a/include/hw/arm/smmu-common.h
+++ b/include/hw/arm/smmu-common.h
@@ -69,30 +69,6 @@ extern AddressSpace __attribute__((weak)) arm_secure_address_space;
extern bool arm_secure_as_available;
void smmu_enable_secure_address_space(void);
-/*
- * Return the address space corresponding to the SEC_SID.
- * If SEC_SID is Secure, but secure address space is not available,
- * return NULL and print a warning message.
- */
-static inline AddressSpace *smmu_get_address_space(SMMUSecSID sec_sid)
-{
- switch (sec_sid) {
- case SMMU_SEC_SID_NS:
- return &address_space_memory;
- case SMMU_SEC_SID_R:
- return &address_space_memory;
- case SMMU_SEC_SID_S:
- if (!arm_secure_as_available || arm_secure_address_space.root == NULL) {
- printf("Secure address space requested but not available\n");
- return NULL;
- }
- return &arm_secure_address_space;
- default:
- printf("Unknown SEC_SID value %d\n", sec_sid);
- return NULL;
- }
-}
-
/*
* Page table walk error types
*/
@@ -243,6 +219,31 @@ struct SMMUBaseClass {
};
+/*
+ * Return the address space corresponding to the SEC_SID.
+ * If SEC_SID is Secure, but secure address space is not available,
+ * return NULL and print a warning message.
+ */
+static inline AddressSpace *smmu_get_address_space(struct SMMUState *bs,
+ SMMUSecSID sec_sid)
+{
+ switch (sec_sid) {
+ case SMMU_SEC_SID_NS:
+ return &bs->as_memory;
+ case SMMU_SEC_SID_R:
+ return &bs->as_memory;
+ case SMMU_SEC_SID_S:
+ if (!bs->secure_memory) {
+ printf("Secure address space requested but not available\n");
+ return NULL;
+ }
+ return &bs->as_secure_memory;
+ default:
+ printf("Unknown SEC_SID value %d\n", sec_sid);
+ return NULL;
+ }
+}
+
#define TYPE_ARM_SMMU "arm-smmu"
OBJECT_DECLARE_TYPE(SMMUState, SMMUBaseClass, ARM_SMMU)
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index 5fbfe825fd0..d6aba95cfd9 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -405,13 +405,13 @@ void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid)
* get_pte - Get the content of a page table entry located at
* @base_addr[@index]
*/
-static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
- SMMUPTWEventInfo *info, SMMUSecSID sec_sid)
+static int get_pte(SMMUState *bs, dma_addr_t baseaddr, uint32_t index,
+ uint64_t *pte, SMMUPTWEventInfo *info, SMMUSecSID sec_sid)
{
int ret;
dma_addr_t addr = baseaddr + index * sizeof(*pte);
MemTxAttrs attrs = smmu_get_txattrs(sec_sid);
- AddressSpace *as = smmu_get_address_space(sec_sid);
+ AddressSpace *as = smmu_get_address_space(bs, sec_sid);
if (!as) {
info->type = SMMU_PTW_ERR_WALK_EABT;
info->addr = addr;
@@ -570,7 +570,7 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
/* Use NS if forced by previous NSTable=1 or current nscfg */
int current_ns = forced_ns || nscfg;
SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
- if (get_pte(baseaddr, offset, &pte, info, sec_sid)) {
+ if (get_pte(bs, baseaddr, offset, &pte, info, sec_sid)) {
goto error;
}
trace_smmu_ptw_level(stage, level, iova, subpage_size,
@@ -658,7 +658,7 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
}
tlbe->sec_sid = SMMU_SEC_SID_NS;
- tlbe->entry.target_as = smmu_get_address_space(tlbe->sec_sid);
+ tlbe->entry.target_as = smmu_get_address_space(bs, tlbe->sec_sid);
if (!tlbe->entry.target_as) {
info->type = SMMU_PTW_ERR_WALK_EABT;
info->addr = gpa;
@@ -720,6 +720,7 @@ static int AArch64_S2StartLevel(int sl0 , int granule_sz)
/**
* smmu_ptw_64_s2 - VMSAv8-64 Walk of the page tables for a given ipa
* for stage-2.
+ * @bs: SMMU base state
* @cfg: translation config
* @ipa: ipa to translate
* @perm: access type
@@ -731,7 +732,7 @@ static int AArch64_S2StartLevel(int sl0 , int granule_sz)
* Upon success, @tlbe is filled with translated_addr and entry
* permission rights.
*/
-static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
+static int smmu_ptw_64_s2(SMMUState *bs, SMMUTransCfg *cfg,
dma_addr_t ipa, IOMMUAccessFlags perm,
SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
{
@@ -834,7 +835,7 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
uint8_t s2ap;
/* Use NS as Secure Stage 2 is not implemented (SMMU_S_IDR1.SEL2 == 0)*/
- if (get_pte(baseaddr, offset, &pte, info, SMMU_SEC_SID_NS)) {
+ if (get_pte(bs, baseaddr, offset, &pte, info, SMMU_SEC_SID_NS)) {
goto error;
}
trace_smmu_ptw_level(stage, level, ipa, subpage_size,
@@ -888,7 +889,7 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
}
tlbe->sec_sid = SMMU_SEC_SID_NS;
- tlbe->entry.target_as = &address_space_memory;
+ tlbe->entry.target_as = &bs->as_memory;
tlbe->entry.translated_addr = gpa;
tlbe->entry.iova = ipa & ~mask;
tlbe->entry.addr_mask = mask;
@@ -964,7 +965,7 @@ int smmu_ptw(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t iova,
return -EINVAL;
}
- return smmu_ptw_64_s2(cfg, iova, perm, tlbe, info);
+ return smmu_ptw_64_s2(bs, cfg, iova, perm, tlbe, info);
}
/* SMMU_NESTED. */
@@ -985,7 +986,7 @@ int smmu_ptw(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t iova,
}
ipa = CACHED_ENTRY_TO_ADDR(tlbe, iova);
- ret = smmu_ptw_64_s2(cfg, ipa, perm, &tlbe_s2, info);
+ ret = smmu_ptw_64_s2(bs, cfg, ipa, perm, &tlbe_s2, info);
if (ret) {
return ret;
}
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 885dae6f50e..a4a03c064d5 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -110,13 +110,14 @@ static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn,
trace_smmuv3_write_gerrorn(toggled & pending, bank->gerrorn);
}
-static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd, SMMUSecSID sec_sid)
+static inline MemTxResult queue_read(SMMUState *bs, SMMUQueue *q,
+ Cmd *cmd, SMMUSecSID sec_sid)
{
dma_addr_t addr = Q_CONS_ENTRY(q);
MemTxResult ret;
int i;
- ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(Cmd),
+ ret = dma_memory_read(&bs->as_memory, addr, cmd, sizeof(Cmd),
MEMTXATTRS_UNSPECIFIED);
if (ret != MEMTX_OK) {
return ret;
@@ -127,7 +128,7 @@ static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd, SMMUSecSID sec_sid)
return ret;
}
-static MemTxResult queue_write(SMMUQueue *q, Evt *evt_in)
+static MemTxResult queue_write(SMMUState *bs, SMMUQueue *q, Evt *evt_in)
{
dma_addr_t addr = Q_PROD_ENTRY(q);
MemTxResult ret;
@@ -137,7 +138,7 @@ static MemTxResult queue_write(SMMUQueue *q, Evt *evt_in)
for (i = 0; i < ARRAY_SIZE(evt.word); i++) {
cpu_to_le32s(&evt.word[i]);
}
- ret = dma_memory_write(&address_space_memory, addr, &evt, sizeof(Evt),
+ ret = dma_memory_write(&bs->as_memory, addr, &evt, sizeof(Evt),
MEMTXATTRS_UNSPECIFIED);
if (ret != MEMTX_OK) {
return ret;
@@ -162,7 +163,7 @@ static MemTxResult smmuv3_write_eventq(SMMUv3State *s, SMMUSecSID sec_sid,
return MEMTX_ERROR;
}
- r = queue_write(q, evt);
+ r = queue_write(&s->smmu_state, q, evt);
if (r != MEMTX_OK) {
return r;
}
@@ -993,7 +994,7 @@ static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event,
cfg = g_new0(SMMUTransCfg, 1);
cfg->sec_sid = sec_sid;
cfg->txattrs = smmu_get_txattrs(sec_sid);
- cfg->as = smmu_get_address_space(sec_sid);
+ cfg->as = smmu_get_address_space(bc, sec_sid);
if (!cfg->as) {
/* Can't get AddressSpace, free cfg and return. */
g_free(cfg);
@@ -1221,7 +1222,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
SMMUTranslationStatus status;
SMMUTransCfg *cfg = NULL;
IOMMUTLBEntry entry = {
- .target_as = &address_space_memory,
+ .target_as = &s->smmu_state.as_memory,
.iova = addr,
.translated_addr = addr,
.addr_mask = ~(hwaddr)0,
@@ -1322,6 +1323,8 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
SMMUSecSID sec_sid)
{
SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
+ SMMUv3State *s3 = sdev->smmu;
+ SMMUState *bs = &(s3->smmu_state);
SMMUEventInfo eventinfo = {.sec_sid = sec_sid,
.inval_ste_allowed = true};
SMMUTransCfg *cfg = smmuv3_get_config(sdev, &eventinfo, sec_sid);
@@ -1369,7 +1372,7 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
}
event.type = IOMMU_NOTIFIER_UNMAP;
- event.entry.target_as = smmu_get_address_space(sec_sid);
+ event.entry.target_as = smmu_get_address_space(bs, sec_sid);
event.entry.iova = iova;
event.entry.addr_mask = num_pages * (1 << granule) - 1;
event.entry.perm = IOMMU_NONE;
@@ -1618,7 +1621,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecSID sec_sid)
break;
}
- if (queue_read(q, &cmd, sec_sid) != MEMTX_OK) {
+ if (queue_read(&s->smmu_state, q, &cmd, sec_sid) != MEMTX_OK) {
cmd_error = SMMU_CERROR_ABT;
break;
}
@@ -1649,7 +1652,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecSID sec_sid)
SMMUTransCfg *cfg = g_new0(SMMUTransCfg, 1);
cfg->sec_sid = sec_sid;
cfg->txattrs = smmu_get_txattrs(sec_sid);
- cfg->as = smmu_get_address_space(sec_sid);
+ cfg->as = smmu_get_address_space(bs, sec_sid);
if (!cfg->as) {
g_free(cfg);
qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 Can't get address space\n");
--
2.47.3