Let's move out the is_kdump_kernel() check from iommu_dma_deferred_attach()
to iommu_dma_init(), and use the static-key in the fast-path to minimize
the impact in the normal case.

Signed-off-by: Lianbo Jiang <liji...@redhat.com>
Co-developed-by: Robin Murphy <robin.mur...@arm.com>
Signed-off-by: Robin Murphy <robin.mur...@arm.com>
---
 drivers/iommu/dma-iommu.c | 17 +++++++++++------
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f0305e6aac1b..3711b4a6e4f9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -51,6 +51,8 @@ struct iommu_dma_cookie {
        struct iommu_domain             *fq_domain;
 };
 
+static DEFINE_STATIC_KEY_FALSE(__deferred_attach);
+
 void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
                struct iommu_domain *domain)
 {
@@ -383,9 +385,6 @@ static int iommu_dma_deferred_attach(struct device *dev,
 {
        const struct iommu_ops *ops = domain->ops;
 
-       if (!is_kdump_kernel())
-               return 0;
-
        if (unlikely(ops->is_attach_deferred &&
                        ops->is_attach_deferred(domain, dev)))
                return iommu_attach_device(domain, dev);
@@ -535,7 +534,8 @@ static dma_addr_t __iommu_dma_map(struct device *dev, 
phys_addr_t phys,
        size_t iova_off = iova_offset(iovad, phys);
        dma_addr_t iova;
 
-       if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+       if (static_branch_unlikely(&__deferred_attach) &&
+           iommu_dma_deferred_attach(dev, domain))
                return DMA_MAPPING_ERROR;
 
        size = iova_align(iovad, size + iova_off);
@@ -693,7 +693,8 @@ static void *iommu_dma_alloc_remap(struct device *dev, 
size_t size,
 
        *dma_handle = DMA_MAPPING_ERROR;
 
-       if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+       if (static_branch_unlikely(&__deferred_attach) &&
+           iommu_dma_deferred_attach(dev, domain))
                return NULL;
 
        min_size = alloc_sizes & -alloc_sizes;
@@ -1003,7 +1004,8 @@ static int iommu_dma_map_sg(struct device *dev, struct 
scatterlist *sg,
        unsigned long mask = dma_get_seg_boundary(dev);
        int i;
 
-       if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+       if (static_branch_unlikely(&__deferred_attach) &&
+           iommu_dma_deferred_attach(dev, domain))
                return 0;
 
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
@@ -1451,6 +1453,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
 
 static int iommu_dma_init(void)
 {
+       if (is_kdump_kernel())
+               static_branch_enable(&__deferred_attach);
+
        return iova_cache_get();
 }
 arch_initcall(iommu_dma_init);
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to