Allocate and set the per-device iommu private data during iommu device
probe. Add a flag to indicate whether default domain attachment is
deferred. With this refactoring, the dummy DEFER_DEVICE_DOMAIN_INFO
pointer is removed.

Signed-off-by: Lu Baolu <[email protected]>
---
 drivers/iommu/intel/iommu.c | 122 +++++++++++++++---------------------
 1 file changed, 51 insertions(+), 71 deletions(-)

diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 185aa38df602..165c890b8304 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -309,19 +309,9 @@ static int iommu_skip_te_disable;
 int intel_iommu_gfx_mapped;
 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 
-#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
 struct device_domain_info *get_domain_info(struct device *dev)
 {
-       struct device_domain_info *info;
-
-       if (!dev)
-               return NULL;
-
-       info = dev_iommu_priv_get(dev);
-       if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
-               return NULL;
-
-       return info;
+       return dev_iommu_priv_get(dev);
 }
 
 DEFINE_SPINLOCK(device_domain_lock);
@@ -708,11 +698,6 @@ struct context_entry *iommu_context_addr(struct 
intel_iommu *iommu, u8 bus,
        return &context[devfn];
 }
 
-static bool attach_deferred(struct device *dev)
-{
-       return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
-}
-
 /**
  * is_downstream_to_pci_bridge - test if a device belongs to the PCI
  *                              sub-hierarchy of a candidate PCI-PCI bridge
@@ -2426,9 +2411,6 @@ struct dmar_domain *find_domain(struct device *dev)
        if (unlikely(!dev || !dev->iommu))
                return NULL;
 
-       if (unlikely(attach_deferred(dev)))
-               return NULL;
-
        /* No lock here, assumes no domain exit in normal case */
        info = get_domain_info(dev);
        if (likely(info))
@@ -2497,66 +2479,20 @@ static struct dmar_domain 
*dmar_insert_one_dev_info(struct intel_iommu *iommu,
                                                    struct device *dev,
                                                    struct dmar_domain *domain)
 {
-       struct device_domain_info *info;
+       struct device_domain_info *info = get_domain_info(dev);
        unsigned long flags;
        int ret;
 
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
-       if (!info)
-               return NULL;
-
-       if (!dev_is_real_dma_subdevice(dev)) {
-               info->bus = bus;
-               info->devfn = devfn;
-               info->segment = iommu->segment;
-       } else {
-               struct pci_dev *pdev = to_pci_dev(dev);
-
-               info->bus = pdev->bus->number;
-               info->devfn = pdev->devfn;
-               info->segment = pci_domain_nr(pdev->bus);
-       }
-
-       info->dev = dev;
-       info->domain = domain;
-       info->iommu = iommu;
-
-       if (dev && dev_is_pci(dev)) {
-               struct pci_dev *pdev = to_pci_dev(info->dev);
-
-               if (ecap_dev_iotlb_support(iommu->ecap) &&
-                   pci_ats_supported(pdev) &&
-                   dmar_find_matched_atsr_unit(pdev))
-                       info->ats_supported = 1;
-
-               if (sm_supported(iommu)) {
-                       if (pasid_supported(iommu)) {
-                               int features = pci_pasid_features(pdev);
-                               if (features >= 0)
-                                       info->pasid_supported = features | 1;
-                       }
-
-                       if (info->ats_supported && ecap_prs(iommu->ecap) &&
-                           pci_pri_supported(pdev))
-                               info->pri_supported = 1;
-               }
-       }
-
        spin_lock_irqsave(&device_domain_lock, flags);
+       info->domain = domain;
        spin_lock(&iommu->lock);
        ret = domain_attach_iommu(domain, iommu);
        spin_unlock(&iommu->lock);
-
        if (ret) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
-               kfree(info);
                return NULL;
        }
-
        list_add(&info->link, &domain->devices);
-       list_add(&info->global, &device_domain_list);
-       if (dev)
-               dev_iommu_priv_set(dev, info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
        /* PASID table is mandatory for a PCI device in scalable mode. */
@@ -4405,14 +4341,56 @@ static bool intel_iommu_capable(enum iommu_cap cap)
 
 static struct iommu_device *intel_iommu_probe_device(struct device *dev)
 {
+       struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
+       struct device_domain_info *info;
        struct intel_iommu *iommu;
+       unsigned long flags;
+       u8 bus, devfn;
 
-       iommu = device_to_iommu(dev, NULL, NULL);
+       iommu = device_to_iommu(dev, &bus, &devfn);
        if (!iommu)
                return ERR_PTR(-ENODEV);
 
-       if (translation_pre_enabled(iommu))
-               dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return ERR_PTR(-ENOMEM);
+
+       if (dev_is_real_dma_subdevice(dev)) {
+               info->bus = pdev->bus->number;
+               info->devfn = pdev->devfn;
+               info->segment = pci_domain_nr(pdev->bus);
+       } else {
+               info->bus = bus;
+               info->devfn = devfn;
+               info->segment = iommu->segment;
+       }
+
+       info->dev = dev;
+       info->iommu = iommu;
+       if (dev_is_pci(dev)) {
+               if (ecap_dev_iotlb_support(iommu->ecap) &&
+                   pci_ats_supported(pdev) &&
+                   dmar_find_matched_atsr_unit(pdev))
+                       info->ats_supported = 1;
+
+               if (sm_supported(iommu)) {
+                       if (pasid_supported(iommu)) {
+                               int features = pci_pasid_features(pdev);
+
+                               if (features >= 0)
+                                       info->pasid_supported = features | 1;
+                       }
+
+                       if (info->ats_supported && ecap_prs(iommu->ecap) &&
+                           pci_pri_supported(pdev))
+                               info->pri_supported = 1;
+               }
+       }
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_add(&info->global, &device_domain_list);
+       dev_iommu_priv_set(dev, info);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        return &iommu->iommu;
 }
@@ -4635,7 +4613,9 @@ intel_iommu_dev_disable_feat(struct device *dev, enum 
iommu_dev_features feat)
 
 static bool intel_iommu_is_attach_deferred(struct device *dev)
 {
-       return attach_deferred(dev);
+       struct device_domain_info *info = get_domain_info(dev);
+
+       return translation_pre_enabled(info->iommu) && !info->domain;
 }
 
 /*
-- 
2.25.1

_______________________________________________
iommu mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to