On 10/26/23 12:30, Zhenzhong Duan wrote:
From: Eric Auger <eric.au...@redhat.com>

No functional change intended.

Signed-off-by: Eric Auger <eric.au...@redhat.com>
Signed-off-by: Yi Liu <yi.l....@intel.com>
Signed-off-by: Yi Sun <yi.y....@linux.intel.com>
Signed-off-by: Zhenzhong Duan <zhenzhong.d...@intel.com>
Signed-off-by: Cédric Le Goater <c...@redhat.com>

Drop the Sob please,


Reviewed-by: Cédric Le Goater <c...@redhat.com>

Thanks,

C.


---
  include/hw/vfio/vfio-common.h         |  2 --
  include/hw/vfio/vfio-container-base.h |  2 ++
  hw/vfio/container.c                   | 11 ++++++-----
  3 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 56452018a9..423ab2436c 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -80,8 +80,6 @@ typedef struct VFIOContainer {
      int fd; /* /dev/vfio/vfio, empowered by the attached groups */
      MemoryListener prereg_listener;
      unsigned iommu_type;
-    uint64_t dirty_pgsizes;
-    uint64_t max_dirty_bitmap_size;
      QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
      QLIST_HEAD(, VFIOGroup) group_list;
      GList *iova_ranges;
diff --git a/include/hw/vfio/vfio-container-base.h 
b/include/hw/vfio/vfio-container-base.h
index 89642e6b45..526d23acfd 100644
--- a/include/hw/vfio/vfio-container-base.h
+++ b/include/hw/vfio/vfio-container-base.h
@@ -53,6 +53,8 @@ typedef struct VFIOContainerBase {
      MemoryListener listener;
      Error *error;
      bool initialized;
+    uint64_t dirty_pgsizes;
+    uint64_t max_dirty_bitmap_size;
      unsigned long pgsizes;
      unsigned int dma_max_mappings;
      bool dirty_pages_supported;
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index b8f36f56d2..68dc6d240f 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -64,6 +64,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
                                   hwaddr iova, ram_addr_t size,
                                   IOMMUTLBEntry *iotlb)
  {
+    VFIOContainerBase *bcontainer = &container->bcontainer;
      struct vfio_iommu_type1_dma_unmap *unmap;
      struct vfio_bitmap *bitmap;
      VFIOBitmap vbmap;
@@ -91,7 +92,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
      bitmap->size = vbmap.size;
      bitmap->data = (__u64 *)vbmap.bitmap;
- if (vbmap.size > container->max_dirty_bitmap_size) {
+    if (vbmap.size > bcontainer->max_dirty_bitmap_size) {
          error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size);
          ret = -E2BIG;
          goto unmap_exit;
@@ -131,7 +132,7 @@ static int vfio_legacy_dma_unmap(VFIOContainerBase 
*bcontainer, hwaddr iova,
if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) {
          if (!vfio_devices_all_device_dirty_tracking(bcontainer) &&
-            container->bcontainer.dirty_pages_supported) {
+            bcontainer->dirty_pages_supported) {
              return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
          }
@@ -154,7 +155,7 @@ static int vfio_legacy_dma_unmap(VFIOContainerBase *bcontainer, hwaddr iova,
          if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
              container->iommu_type == VFIO_TYPE1v2_IOMMU) {
              trace_vfio_legacy_dma_unmap_overflow_workaround();
-            unmap.size -= 1ULL << ctz64(container->bcontainer.pgsizes);
+            unmap.size -= 1ULL << ctz64(bcontainer->pgsizes);
              continue;
          }
          error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
@@ -469,8 +470,8 @@ static void vfio_get_iommu_info_migration(VFIOContainer 
*container,
       */
      if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
          bcontainer->dirty_pages_supported = true;
-        container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
-        container->dirty_pgsizes = cap_mig->pgsize_bitmap;
+        bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
+        bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap;
      }
  }


Reply via email to