Until now, we have already used the tlb operations from iommu framework,
then the tlb operations for v7s can be removed.

Correspondingly, Switch the paramenter "cookie" to internal structure.

Signed-off-by: Yong Wu <[email protected]>
---
 drivers/iommu/mtk_iommu.c | 27 ++++-----------------------
 1 file changed, 4 insertions(+), 23 deletions(-)

diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 89cec51405cd..5656819cd4a1 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -206,10 +206,8 @@ static struct mtk_iommu_domain *to_mtk_domain(struct 
iommu_domain *dom)
        return container_of(dom, struct mtk_iommu_domain, domain);
 }
 
-static void mtk_iommu_tlb_flush_all(void *cookie)
+static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
 {
-       struct mtk_iommu_data *data = cookie;
-
        for_each_m4u(data) {
                if (!pm_runtime_active(data->dev))
                        continue;
@@ -221,9 +219,9 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
 }
 
 static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
-                                          size_t granule, void *cookie)
+                                          size_t granule,
+                                          struct mtk_iommu_data *data)
 {
-       struct mtk_iommu_data *data = cookie;
        unsigned long flags;
        int ret;
        u32 tmp;
@@ -250,7 +248,7 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long 
iova, size_t size,
                if (ret) {
                        dev_warn(data->dev,
                                 "Partial TLB flush timed out, falling back to 
full flush\n");
-                       mtk_iommu_tlb_flush_all(cookie);
+                       mtk_iommu_tlb_flush_all(data);
                }
                /* Clear the CPE status */
                writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
@@ -258,22 +256,6 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long 
iova, size_t size,
        }
 }
 
-static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
-                                           unsigned long iova, size_t granule,
-                                           void *cookie)
-{
-       struct mtk_iommu_data *data = cookie;
-       struct iommu_domain *domain = &data->m4u_dom->domain;
-
-       iommu_iotlb_gather_add_page(domain, gather, iova, granule);
-}
-
-static const struct iommu_flush_ops mtk_iommu_flush_ops = {
-       .tlb_flush_all = mtk_iommu_tlb_flush_all,
-       .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
-       .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
-};
-
 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
 {
        struct mtk_iommu_data *data = dev_id;
@@ -380,7 +362,6 @@ static int mtk_iommu_domain_finalise(struct 
mtk_iommu_domain *dom)
                .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
                .ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 
32,
                .oas = 35,
-               .tlb = &mtk_iommu_flush_ops,
                .iommu_dev = data->dev,
        };
 
-- 
2.18.0

Reply via email to