Squash and simplify some of the freeing code, and move the init
and free routines down into the rest of the flush queue code to
obviate the forward declarations.

Reviewed-by: John Garry <john.ga...@huawei.com>
Signed-off-by: Robin Murphy <robin.mur...@arm.com>
---
 drivers/iommu/iova.c | 131 +++++++++++++++++++------------------------
 1 file changed, 58 insertions(+), 73 deletions(-)

diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index a0250cebcdcf..c19f9a749070 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -24,8 +24,6 @@ static unsigned long iova_rcache_get(struct iova_domain 
*iovad,
 static void init_iova_rcaches(struct iova_domain *iovad);
 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
 static void free_iova_rcaches(struct iova_domain *iovad);
-static void fq_destroy_all_entries(struct iova_domain *iovad);
-static void fq_flush_timeout(struct timer_list *t);
 
 static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
 {
@@ -73,60 +71,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long 
granule,
 }
 EXPORT_SYMBOL_GPL(init_iova_domain);
 
-static bool has_iova_flush_queue(struct iova_domain *iovad)
-{
-       return !!iovad->fq;
-}
-
-static void free_iova_flush_queue(struct iova_domain *iovad)
-{
-       if (!has_iova_flush_queue(iovad))
-               return;
-
-       del_timer_sync(&iovad->fq_timer);
-
-       fq_destroy_all_entries(iovad);
-
-       free_percpu(iovad->fq);
-
-       iovad->fq         = NULL;
-       iovad->fq_domain  = NULL;
-}
-
-int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain 
*fq_domain)
-{
-       struct iova_fq __percpu *queue;
-       int i, cpu;
-
-       atomic64_set(&iovad->fq_flush_start_cnt,  0);
-       atomic64_set(&iovad->fq_flush_finish_cnt, 0);
-
-       queue = alloc_percpu(struct iova_fq);
-       if (!queue)
-               return -ENOMEM;
-
-       for_each_possible_cpu(cpu) {
-               struct iova_fq *fq;
-
-               fq = per_cpu_ptr(queue, cpu);
-               fq->head = 0;
-               fq->tail = 0;
-
-               spin_lock_init(&fq->lock);
-
-               for (i = 0; i < IOVA_FQ_SIZE; i++)
-                       INIT_LIST_HEAD(&fq->entries[i].freelist);
-       }
-
-       iovad->fq_domain = fq_domain;
-       iovad->fq = queue;
-
-       timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
-       atomic_set(&iovad->fq_timer_on, 0);
-
-       return 0;
-}
-
 static struct rb_node *
 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
 {
@@ -585,23 +529,6 @@ static void iova_domain_flush(struct iova_domain *iovad)
        atomic64_inc(&iovad->fq_flush_finish_cnt);
 }
 
-static void fq_destroy_all_entries(struct iova_domain *iovad)
-{
-       int cpu;
-
-       /*
-        * This code runs when the iova_domain is being detroyed, so don't
-        * bother to free iovas, just free any remaining pagetable pages.
-        */
-       for_each_possible_cpu(cpu) {
-               struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
-               int idx;
-
-               fq_ring_for_each(idx, fq)
-                       put_pages_list(&fq->entries[idx].freelist);
-       }
-}
-
 static void fq_flush_timeout(struct timer_list *t)
 {
        struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
@@ -669,6 +596,64 @@ void queue_iova(struct iova_domain *iovad,
                          jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
 }
 
+static void free_iova_flush_queue(struct iova_domain *iovad)
+{
+       int cpu, idx;
+
+       if (!iovad->fq)
+               return;
+
+       del_timer_sync(&iovad->fq_timer);
+       /*
+        * This code runs when the iova_domain is being detroyed, so don't
+        * bother to free iovas, just free any remaining pagetable pages.
+        */
+       for_each_possible_cpu(cpu) {
+               struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
+
+               fq_ring_for_each(idx, fq)
+                       put_pages_list(&fq->entries[idx].freelist);
+       }
+
+       free_percpu(iovad->fq);
+
+       iovad->fq = NULL;
+       iovad->fq_domain = NULL;
+}
+
+int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain 
*fq_domain)
+{
+       struct iova_fq __percpu *queue;
+       int i, cpu;
+
+       atomic64_set(&iovad->fq_flush_start_cnt,  0);
+       atomic64_set(&iovad->fq_flush_finish_cnt, 0);
+
+       queue = alloc_percpu(struct iova_fq);
+       if (!queue)
+               return -ENOMEM;
+
+       for_each_possible_cpu(cpu) {
+               struct iova_fq *fq = per_cpu_ptr(queue, cpu);
+
+               fq->head = 0;
+               fq->tail = 0;
+
+               spin_lock_init(&fq->lock);
+
+               for (i = 0; i < IOVA_FQ_SIZE; i++)
+                       INIT_LIST_HEAD(&fq->entries[i].freelist);
+       }
+
+       iovad->fq_domain = fq_domain;
+       iovad->fq = queue;
+
+       timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
+       atomic_set(&iovad->fq_timer_on, 0);
+
+       return 0;
+}
+
 /**
  * put_iova_domain - destroys the iova domain
  * @iovad: - iova domain in question.
-- 
2.28.0.dirty

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to