Re: [PATCH] iommu/iova: Separate out rcache init

2022-01-28 Thread Robin Murphy

On 2022-01-28 11:32, John Garry wrote:

On 26/01/2022 17:00, Robin Murphy wrote:

As above, I vote for just forward-declaring the free routine in iova.c
and keeping it entirely private.


BTW, speaking of forward declarations, it's possible to remove all the 
forward declarations in iova.c now that the FQ code is gone - but with a 
good bit of rearranging. However I am not sure how much people care 
about that or whether the code layout is sane...


Indeed, I was very tempted to raise the question there of whether there 
was any more cleanup or refactoring that could be done to justify 
collecting all the rcache code together at the top of iova.c. But in the 
end I didn't, so my opinion still remains a secret...


Robin.
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH] iommu/iova: Separate out rcache init

2022-01-26 Thread Robin Murphy

On 2022-01-26 13:55, John Garry wrote:

Currently the rcache structures are allocated for all IOVA domains, even if
they do not use "fast" alloc+free interface. This is wasteful of memory.

In addition, fails in init_iova_rcaches() are not handled safely, which is
less than ideal.

Make "fast" users call a separate rcache init explicitly, which includes
error checking.

Signed-off-by: John Garry 


Mangled patch? (no "---" separator here)

Overall this looks great, just a few comments further down...


diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 3a46f2cc9e5d..dd066d990809 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -525,6 +525,7 @@ static int iommu_dma_init_domain(struct iommu_domain 
*domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn;
struct iova_domain *iovad;
+   int ret;
  
  	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)

return -EINVAL;
@@ -559,6 +560,9 @@ static int iommu_dma_init_domain(struct iommu_domain 
*domain, dma_addr_t base,
}
  
  	init_iova_domain(iovad, 1UL << order, base_pfn);

+   ret = iova_domain_init_rcaches(iovad);
+   if (ret)
+   return ret;
  
  	/* If the FQ fails we can simply fall back to strict mode */

if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b28c9435b898..d3adc6ea5710 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -15,13 +15,14 @@
  /* The anchor node sits above the top of the usable address space */
  #define IOVA_ANCHOR   ~0UL
  
+#define IOVA_RANGE_CACHE_MAX_SIZE 6	/* log of max cached IOVA range size (in pages) */

+
  static bool iova_rcache_insert(struct iova_domain *iovad,
   unsigned long pfn,
   unsigned long size);
  static unsigned long iova_rcache_get(struct iova_domain *iovad,
 unsigned long size,
 unsigned long limit_pfn);
-static void init_iova_rcaches(struct iova_domain *iovad);
  static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain 
*iovad);
  static void free_iova_rcaches(struct iova_domain *iovad);
  
@@ -64,8 +65,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,

iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(>anchor.node, NULL, >rbroot.rb_node);
rb_insert_color(>anchor.node, >rbroot);
-   cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, 
>cpuhp_dead);
-   init_iova_rcaches(iovad);
  }
  EXPORT_SYMBOL_GPL(init_iova_domain);
  
@@ -497,9 +496,9 @@ void put_iova_domain(struct iova_domain *iovad)

  {
struct iova *iova, *tmp;
  
-	cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,

-   >cpuhp_dead);
-   free_iova_rcaches(iovad);
+   if (iovad->rcaches)
+   iova_domain_free_rcaches(iovad);
+
rbtree_postorder_for_each_entry_safe(iova, tmp, >rbroot, node)
free_iova_mem(iova);
  }
@@ -608,6 +607,7 @@ EXPORT_SYMBOL_GPL(reserve_iova);
   */
  
  #define IOVA_MAG_SIZE 128

+#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
  
  struct iova_magazine {

unsigned long size;
@@ -620,6 +620,13 @@ struct iova_cpu_rcache {
struct iova_magazine *prev;
  };
  
+struct iova_rcache {

+   spinlock_t lock;
+   unsigned long depot_size;
+   struct iova_magazine *depot[MAX_GLOBAL_MAGS];
+   struct iova_cpu_rcache __percpu *cpu_rcaches;
+};
+
  static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
  {
return kzalloc(sizeof(struct iova_magazine), flags);
@@ -693,28 +700,62 @@ static void iova_magazine_push(struct iova_magazine *mag, 
unsigned long pfn)
mag->pfns[mag->size++] = pfn;
  }
  
-static void init_iova_rcaches(struct iova_domain *iovad)

+int iova_domain_init_rcaches(struct iova_domain *iovad)
  {
-   struct iova_cpu_rcache *cpu_rcache;
-   struct iova_rcache *rcache;
unsigned int cpu;
-   int i;
+   int i, ret;
+
+   iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE,
+sizeof(struct iova_rcache),
+GFP_KERNEL);
+   if (!iovad->rcaches)
+   return -ENOMEM;
  
  	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {

+   struct iova_cpu_rcache *cpu_rcache;
+   struct iova_rcache *rcache;
+
rcache = >rcaches[i];
spin_lock_init(>lock);
rcache->depot_size = 0;
-   rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), 
cache_line_size());
-   if (WARN_ON(!rcache->cpu_rcaches))
-   continue;
+   rcache->cpu_rcaches =