Re: [PATCH] vfio/iommu_type1: Mantainance a counter for non_pinned_groups

2021-02-02 Thread Alex Williamson
On Mon, 25 Jan 2021 10:46:42 +0800
Keqian Zhu  wrote:

> With this counter, we never need to traverse all groups to update
> pinned_scope of vfio_iommu.
> 
> Suggested-by: Alex Williamson 
> Signed-off-by: Keqian Zhu 
> ---
>  drivers/vfio/vfio_iommu_type1.c | 40 +
>  1 file changed, 5 insertions(+), 35 deletions(-)

Applied to vfio next branch for v5.12.  Thanks,

Alex

> 
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 0b4dedaa9128..bb4bbcc79101 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -73,7 +73,7 @@ struct vfio_iommu {
>   boolv2;
>   boolnesting;
>   booldirty_page_tracking;
> - boolpinned_page_dirty_scope;
> + uint64_tnum_non_pinned_groups;
>  };
>  
>  struct vfio_domain {
> @@ -148,7 +148,6 @@ static int put_pfn(unsigned long pfn, int prot);
>  static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu 
> *iommu,
>  struct iommu_group *iommu_group);
>  
> -static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
>  /*
>   * This code handles mapping and unmapping of user data buffers
>   * into DMA'ble space using the IOMMU
> @@ -714,7 +713,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
>   group = vfio_iommu_find_iommu_group(iommu, iommu_group);
>   if (!group->pinned_page_dirty_scope) {
>   group->pinned_page_dirty_scope = true;
> - update_pinned_page_dirty_scope(iommu);
> + iommu->num_non_pinned_groups--;
>   }
>  
>   goto pin_done;
> @@ -991,7 +990,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct 
> vfio_iommu *iommu,
>* mark all pages dirty if any IOMMU capable device is not able
>* to report dirty pages and all pages are pinned and mapped.
>*/
> - if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
> + if (iommu->num_non_pinned_groups && dma->iommu_mapped)
>   bitmap_set(dma->bitmap, 0, nbits);
>  
>   if (shift) {
> @@ -1622,33 +1621,6 @@ static struct vfio_group 
> *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
>   return group;
>  }
>  
> -static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
> -{
> - struct vfio_domain *domain;
> - struct vfio_group *group;
> -
> - list_for_each_entry(domain, >domain_list, next) {
> - list_for_each_entry(group, >group_list, next) {
> - if (!group->pinned_page_dirty_scope) {
> - iommu->pinned_page_dirty_scope = false;
> - return;
> - }
> - }
> - }
> -
> - if (iommu->external_domain) {
> - domain = iommu->external_domain;
> - list_for_each_entry(group, >group_list, next) {
> - if (!group->pinned_page_dirty_scope) {
> - iommu->pinned_page_dirty_scope = false;
> - return;
> - }
> - }
> - }
> -
> - iommu->pinned_page_dirty_scope = true;
> -}
> -
>  static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
> phys_addr_t *base)
>  {
> @@ -2057,8 +2029,6 @@ static int vfio_iommu_type1_attach_group(void 
> *iommu_data,
>* addition of a dirty tracking group.
>*/
>   group->pinned_page_dirty_scope = true;
> - if (!iommu->pinned_page_dirty_scope)
> - update_pinned_page_dirty_scope(iommu);
>   mutex_unlock(>lock);
>  
>   return 0;
> @@ -2188,7 +2158,7 @@ static int vfio_iommu_type1_attach_group(void 
> *iommu_data,
>* demotes the iommu scope until it declares itself dirty tracking
>* capable via the page pinning interface.
>*/
> - iommu->pinned_page_dirty_scope = false;
> + iommu->num_non_pinned_groups++;
>   mutex_unlock(>lock);
>   vfio_iommu_resv_free(_resv_regions);
>  
> @@ -2416,7 +2386,7 @@ static void vfio_iommu_type1_detach_group(void 
> *iommu_data,
>* to be promoted.
>*/
>   if (update_dirty_scope)
> - update_pinned_page_dirty_scope(iommu);
> + iommu->num_non_pinned_groups--;
>   mutex_unlock(>lock);
>  }
>  



[PATCH] vfio/iommu_type1: Mantainance a counter for non_pinned_groups

2021-01-24 Thread Keqian Zhu
With this counter, we never need to traverse all groups to update
pinned_scope of vfio_iommu.

Suggested-by: Alex Williamson 
Signed-off-by: Keqian Zhu 
---
 drivers/vfio/vfio_iommu_type1.c | 40 +
 1 file changed, 5 insertions(+), 35 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0b4dedaa9128..bb4bbcc79101 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -73,7 +73,7 @@ struct vfio_iommu {
boolv2;
boolnesting;
booldirty_page_tracking;
-   boolpinned_page_dirty_scope;
+   uint64_tnum_non_pinned_groups;
 };
 
 struct vfio_domain {
@@ -148,7 +148,6 @@ static int put_pfn(unsigned long pfn, int prot);
 static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
   struct iommu_group *iommu_group);
 
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
 /*
  * This code handles mapping and unmapping of user data buffers
  * into DMA'ble space using the IOMMU
@@ -714,7 +713,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
group = vfio_iommu_find_iommu_group(iommu, iommu_group);
if (!group->pinned_page_dirty_scope) {
group->pinned_page_dirty_scope = true;
-   update_pinned_page_dirty_scope(iommu);
+   iommu->num_non_pinned_groups--;
}
 
goto pin_done;
@@ -991,7 +990,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct 
vfio_iommu *iommu,
 * mark all pages dirty if any IOMMU capable device is not able
 * to report dirty pages and all pages are pinned and mapped.
 */
-   if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
+   if (iommu->num_non_pinned_groups && dma->iommu_mapped)
bitmap_set(dma->bitmap, 0, nbits);
 
if (shift) {
@@ -1622,33 +1621,6 @@ static struct vfio_group 
*vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
return group;
 }
 
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
-{
-   struct vfio_domain *domain;
-   struct vfio_group *group;
-
-   list_for_each_entry(domain, >domain_list, next) {
-   list_for_each_entry(group, >group_list, next) {
-   if (!group->pinned_page_dirty_scope) {
-   iommu->pinned_page_dirty_scope = false;
-   return;
-   }
-   }
-   }
-
-   if (iommu->external_domain) {
-   domain = iommu->external_domain;
-   list_for_each_entry(group, >group_list, next) {
-   if (!group->pinned_page_dirty_scope) {
-   iommu->pinned_page_dirty_scope = false;
-   return;
-   }
-   }
-   }
-
-   iommu->pinned_page_dirty_scope = true;
-}
-
 static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
  phys_addr_t *base)
 {
@@ -2057,8 +2029,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
 * addition of a dirty tracking group.
 */
group->pinned_page_dirty_scope = true;
-   if (!iommu->pinned_page_dirty_scope)
-   update_pinned_page_dirty_scope(iommu);
mutex_unlock(>lock);
 
return 0;
@@ -2188,7 +2158,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
 * demotes the iommu scope until it declares itself dirty tracking
 * capable via the page pinning interface.
 */
-   iommu->pinned_page_dirty_scope = false;
+   iommu->num_non_pinned_groups++;
mutex_unlock(>lock);
vfio_iommu_resv_free(_resv_regions);
 
@@ -2416,7 +2386,7 @@ static void vfio_iommu_type1_detach_group(void 
*iommu_data,
 * to be promoted.
 */
if (update_dirty_scope)
-   update_pinned_page_dirty_scope(iommu);
+   iommu->num_non_pinned_groups--;
mutex_unlock(>lock);
 }
 
-- 
2.19.1