The ACPI IVRS table can contain multiple IVHD blocks. Each block contains
information used to initialize each IOMMU instance.

Currently, init_iommu_all sequentially process IVHD block and initialize
IOMMU instance one-by-one. However, certain features require all IOMMUs
to be configured in the same way system-wide. In case certain IVHD blocks
contain inconsistent information (most likely FW bugs), the driver needs
to go through and try to revert settings on IOMMUs that have already been
configured.

A solution is to split IOMMU initialization into 3 phases:

Phase1 : Processes information of the IVRS table for all IOMMU instances.
This allow all IVHDs to be processed prior to enabling features.

Phase2 : Early feature support check on all IOMMUs (using information in
IVHD blocks.

Phase3 : Iterates through all IOMMU instances and enabling features.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpa...@amd.com>
---
 drivers/iommu/amd/init.c | 24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index b3e4551ce9dd..5f86e357dbaa 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1692,7 +1692,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, 
struct ivhd_header *h,
                                 struct acpi_table_header *ivrs_base)
 {
        struct amd_iommu_pci_seg *pci_seg;
-       int ret;
 
        pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
        if (pci_seg == NULL)
@@ -1773,6 +1772,13 @@ static int __init init_iommu_one(struct amd_iommu 
*iommu, struct ivhd_header *h,
        if (!iommu->mmio_base)
                return -ENOMEM;
 
+       return init_iommu_from_acpi(iommu, h);
+}
+
+static int __init init_iommu_one_late(struct amd_iommu *iommu)
+{
+       int ret;
+
        if (alloc_cwwb_sem(iommu))
                return -ENOMEM;
 
@@ -1794,10 +1800,6 @@ static int __init init_iommu_one(struct amd_iommu 
*iommu, struct ivhd_header *h,
        if (amd_iommu_pre_enabled)
                amd_iommu_pre_enabled = translation_pre_enabled(iommu);
 
-       ret = init_iommu_from_acpi(iommu, h);
-       if (ret)
-               return ret;
-
        if (amd_iommu_irq_remap) {
                ret = amd_iommu_create_irq_domain(iommu);
                if (ret)
@@ -1808,7 +1810,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, 
struct ivhd_header *h,
         * Make sure IOMMU is not considered to translate itself. The IVRS
         * table tells us so, but this is a lie!
         */
-       pci_seg->rlookup_table[iommu->devid] = NULL;
+       iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
 
        return 0;
 }
@@ -1853,6 +1855,7 @@ static int __init init_iommu_all(struct acpi_table_header 
*table)
        end += table->length;
        p += IVRS_HEADER_LENGTH;
 
+       /* Phase 1: Process all IVHD blocks */
        while (p < end) {
                h = (struct ivhd_header *)p;
                if (*p == amd_iommu_target_ivhd_type) {
@@ -1878,6 +1881,15 @@ static int __init init_iommu_all(struct 
acpi_table_header *table)
        }
        WARN_ON(p != end);
 
+       /* Phase 2 : Early feature support check */
+
+       /* Phase 3 : Enabling IOMMU features */
+       for_each_iommu(iommu) {
+               ret = init_iommu_one_late(iommu);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
-- 
2.32.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to