The device_domain_lock is used to protect the device tracking list of
a domain. Remove unnecessary spin_lock/unlock()'s and move the necessary
ones around the list access.

Signed-off-by: Lu Baolu <baolu...@linux.intel.com>
---
 drivers/iommu/intel/iommu.c | 68 +++++++++++++++----------------------
 1 file changed, 27 insertions(+), 41 deletions(-)

diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 8345e0c0824c..aa3dea1c9f13 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -534,16 +534,10 @@ static int domain_update_device_node(struct dmar_domain 
*domain)
 {
        struct device_domain_info *info;
        int nid = NUMA_NO_NODE;
+       unsigned long flags;
 
-       assert_spin_locked(&device_domain_lock);
-
-       if (list_empty(&domain->devices))
-               return NUMA_NO_NODE;
-
+       spin_lock_irqsave(&device_domain_lock, flags);
        list_for_each_entry(info, &domain->devices, link) {
-               if (!info->dev)
-                       continue;
-
                /*
                 * There could possibly be multiple device numa nodes as devices
                 * within the same domain may sit behind different IOMMUs. There
@@ -554,6 +548,7 @@ static int domain_update_device_node(struct dmar_domain 
*domain)
                if (nid != NUMA_NO_NODE)
                        break;
        }
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        return nid;
 }
@@ -1376,49 +1371,50 @@ static void __iommu_flush_iotlb(struct intel_iommu 
*iommu, u16 did,
 }
 
 static struct device_domain_info *
-iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
-                        u8 bus, u8 devfn)
+iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
+                       u8 bus, u8 devfn)
 {
-       struct device_domain_info *info;
-
-       assert_spin_locked(&device_domain_lock);
+       struct device_domain_info *info = NULL, *tmp;
+       unsigned long flags;
 
        if (!iommu->qi)
                return NULL;
 
-       list_for_each_entry(info, &domain->devices, link)
-               if (info->iommu == iommu && info->bus == bus &&
-                   info->devfn == devfn) {
-                       if (info->ats_supported && info->dev)
-                               return info;
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_for_each_entry(tmp, &domain->devices, link) {
+               if (tmp->iommu == iommu && tmp->bus == bus &&
+                   tmp->devfn == devfn) {
+                       if (tmp->ats_supported)
+                               info = tmp;
                        break;
                }
+       }
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 
-       return NULL;
+       return info;
 }
 
 static void domain_update_iotlb(struct dmar_domain *domain)
 {
        struct device_domain_info *info;
        bool has_iotlb_device = false;
+       unsigned long flags;
 
-       assert_spin_locked(&device_domain_lock);
-
-       list_for_each_entry(info, &domain->devices, link)
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_for_each_entry(info, &domain->devices, link) {
                if (info->ats_enabled) {
                        has_iotlb_device = true;
                        break;
                }
-
+       }
        domain->has_iotlb_device = has_iotlb_device;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
 {
        struct pci_dev *pdev;
 
-       assert_spin_locked(&device_domain_lock);
-
        if (!info || !dev_is_pci(info->dev))
                return;
 
@@ -1464,8 +1460,6 @@ static void iommu_disable_dev_iotlb(struct 
device_domain_info *info)
 {
        struct pci_dev *pdev;
 
-       assert_spin_locked(&device_domain_lock);
-
        if (!dev_is_pci(info->dev))
                return;
 
@@ -1908,11 +1902,11 @@ static int domain_context_mapping_one(struct 
dmar_domain *domain,
                                      struct pasid_table *table,
                                      u8 bus, u8 devfn)
 {
+       struct device_domain_info *info =
+                       iommu_support_dev_iotlb(domain, iommu, bus, devfn);
        u16 did = domain->iommu_did[iommu->seq_id];
        int translation = CONTEXT_TT_MULTI_LEVEL;
-       struct device_domain_info *info = NULL;
        struct context_entry *context;
-       unsigned long flags;
        int ret;
 
        WARN_ON(did == 0);
@@ -1925,7 +1919,6 @@ static int domain_context_mapping_one(struct dmar_domain 
*domain,
 
        BUG_ON(!domain->pgd);
 
-       spin_lock_irqsave(&device_domain_lock, flags);
        spin_lock(&iommu->lock);
 
        ret = -ENOMEM;
@@ -1978,7 +1971,6 @@ static int domain_context_mapping_one(struct dmar_domain 
*domain,
                 * Setup the Device-TLB enable bit and Page request
                 * Enable bit:
                 */
-               info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
                if (info && info->ats_supported)
                        context_set_sm_dte(context);
                if (info && info->pri_supported)
@@ -2001,7 +1993,6 @@ static int domain_context_mapping_one(struct dmar_domain 
*domain,
                                        goto out_unlock;
                        }
 
-                       info = iommu_support_dev_iotlb(domain, iommu, bus, 
devfn);
                        if (info && info->ats_supported)
                                translation = CONTEXT_TT_DEV_IOTLB;
                        else
@@ -2047,7 +2038,6 @@ static int domain_context_mapping_one(struct dmar_domain 
*domain,
 
 out_unlock:
        spin_unlock(&iommu->lock);
-       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        return ret;
 }
@@ -2460,15 +2450,14 @@ static int domain_add_dev_info(struct dmar_domain 
*domain, struct device *dev)
        if (!iommu)
                return -ENODEV;
 
-       spin_lock_irqsave(&device_domain_lock, flags);
-       info->domain = domain;
        ret = domain_attach_iommu(domain, iommu);
-       if (ret) {
-               spin_unlock_irqrestore(&device_domain_lock, flags);
+       if (ret)
                return ret;
-       }
+
+       spin_lock_irqsave(&device_domain_lock, flags);
        list_add(&info->link, &domain->devices);
        spin_unlock_irqrestore(&device_domain_lock, flags);
+       info->domain = domain;
 
        /* PASID table is mandatory for a PCI device in scalable mode. */
        if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -4637,7 +4626,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, 
struct device *dev)
        struct device_domain_info *info = dev_iommu_priv_get(dev);
        struct context_entry *context;
        struct dmar_domain *domain;
-       unsigned long flags;
        u64 ctx_lo;
        int ret;
 
@@ -4645,7 +4633,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, 
struct device *dev)
        if (!domain)
                return -EINVAL;
 
-       spin_lock_irqsave(&device_domain_lock, flags);
        spin_lock(&iommu->lock);
 
        ret = -EINVAL;
@@ -4677,7 +4664,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, 
struct device *dev)
 
  out:
        spin_unlock(&iommu->lock);
-       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        return ret;
 }
-- 
2.25.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to