-----Original Message-----
From: Joao Martins <joao.m.mart...@oracle.com>
Subject: Re: [PATCH 3/5] vfio/iommufd: Add
IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR flag support
On 10/09/2025 03:36, Zhenzhong Duan wrote:
Pass IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR when doing the last
dirty
bitmap query right before unmap, no PTEs flushes. This accelerates the
query without issue because unmap will tear down the mapping anyway.
Add a new element dirty_tracking_flags in VFIOIOMMUFDContainer to
be used for the flags of iommufd dirty tracking. Currently it is
set to either IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR or 0 based on
the scenario.
Co-developed-by: Joao Martins <joao.m.mart...@oracle.com>
Signed-off-by: Joao Martins <joao.m.mart...@oracle.com>
Signed-off-by: Zhenzhong Duan <zhenzhong.d...@intel.com>
Tested-by: Xudong Hao <xudong....@intel.com>
Tested-by: Giovannio Cabiddu <giovanni.cabi...@intel.com>
---
hw/vfio/vfio-iommufd.h | 1 +
include/system/iommufd.h | 2 +-
backends/iommufd.c | 5 +++--
hw/vfio/iommufd.c | 6 +++++-
backends/trace-events | 2 +-
5 files changed, 11 insertions(+), 5 deletions(-)
diff --git a/hw/vfio/vfio-iommufd.h b/hw/vfio/vfio-iommufd.h
index 07ea0f4304..e0af241c75 100644
--- a/hw/vfio/vfio-iommufd.h
+++ b/hw/vfio/vfio-iommufd.h
@@ -26,6 +26,7 @@ typedef struct VFIOIOMMUFDContainer {
VFIOContainerBase bcontainer;
IOMMUFDBackend *be;
uint32_t ioas_id;
+ uint64_t dirty_tracking_flags;
QLIST_HEAD(, VFIOIOASHwpt) hwpt_list;
} VFIOIOMMUFDContainer;
diff --git a/include/system/iommufd.h b/include/system/iommufd.h
index c9c72ffc45..63898e7b0d 100644
--- a/include/system/iommufd.h
+++ b/include/system/iommufd.h
@@ -64,7 +64,7 @@ bool
iommufd_backend_set_dirty_tracking(IOMMUFDBackend *be, uint32_t
hwpt_id,
bool iommufd_backend_get_dirty_bitmap(IOMMUFDBackend *be,
uint32_t hwpt_id,
uint64_t iova, ram_addr_t
size,
uint64_t page_size,
uint64_t *data,
- Error **errp);
+ uint64_t flags, Error
**errp);
bool iommufd_backend_invalidate_cache(IOMMUFDBackend *be,
uint32_t id,
uint32_t data_type,
uint32_t entry_len,
uint32_t *entry_num, void
*data,
diff --git a/backends/iommufd.c b/backends/iommufd.c
index 2a33c7ab0b..3c4f6157e2 100644
--- a/backends/iommufd.c
+++ b/backends/iommufd.c
@@ -361,7 +361,7 @@ bool
iommufd_backend_get_dirty_bitmap(IOMMUFDBackend *be,
uint32_t hwpt_id,
uint64_t iova, ram_addr_t
size,
uint64_t page_size,
uint64_t *data,
- Error **errp)
+ uint64_t flags, Error **errp)
{
int ret;
struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap = {
@@ -371,11 +371,12 @@ bool
iommufd_backend_get_dirty_bitmap(IOMMUFDBackend *be,
.length = size,
.page_size = page_size,
.data = (uintptr_t)data,
+ .flags = flags,
};
ret = ioctl(be->fd, IOMMU_HWPT_GET_DIRTY_BITMAP,
&get_dirty_bitmap);
trace_iommufd_backend_get_dirty_bitmap(be->fd, hwpt_id, iova,
size,
- page_size, ret ? errno :
0);
+ flags, page_size, ret ?
errno : 0);
if (ret) {
error_setg_errno(errp, errno,
"IOMMU_HWPT_GET_DIRTY_BITMAP
(iova: 0x%"HWADDR_PRIx
diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c
index 0057488ce9..c897aa6b17 100644
--- a/hw/vfio/iommufd.c
+++ b/hw/vfio/iommufd.c
@@ -62,7 +62,7 @@ static int iommufd_cdev_unmap_one(const
VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
IOMMUTLBEntry *iotlb)
{
- const VFIOIOMMUFDContainer *container =
+ VFIOIOMMUFDContainer *container =
container_of(bcontainer, VFIOIOMMUFDContainer,
bcontainer);
bool need_dirty_sync = false;
Error *local_err = NULL;
@@ -73,9 +73,12 @@ static int iommufd_cdev_unmap_one(const
VFIOContainerBase *bcontainer,
if (iotlb && vfio_container_dirty_tracking_is_started(bcontainer)) {
if
(!vfio_container_devices_dirty_tracking_is_supported(bcontainer) &&
bcontainer->dirty_pages_supported) {
+ container->dirty_tracking_flags =
+
IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR;
ret = vfio_container_query_dirty_bitmap(bcontainer, iova,
size,
iotlb->translated_addr,
&local_err);
+ container->dirty_tracking_flags = 0;
Why not changing vfio_container_query_dirty_bitmap to pass a flags too, like
the
original patches? This is a little unnecssary odd style to pass a flag via
container structure rather and then clearing.