With the upcoming reservation/management scheme, early activation will
assign a special vector. The final activation at request_irq() assigns a
real vector, which needs to be updated in the tables.

Split out the reconfiguration code in set_affinity and use it for
reactivation.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Cc: Joerg Roedel <j...@8bytes.org>
Cc: iommu@lists.linux-foundation.org
---
 drivers/iommu/amd_iommu.c |   39 +++++++++++++++++++++++++++++----------
 1 file changed, 29 insertions(+), 10 deletions(-)

--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4170,16 +4170,25 @@ static void irq_remapping_free(struct ir
        irq_domain_free_irqs_common(domain, virq, nr_irqs);
 }
 
+static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
+                              struct amd_ir_data *ir_data,
+                              struct irq_2_irte *irte_info,
+                              struct irq_cfg *cfg);
+
 static int irq_remapping_activate(struct irq_domain *domain,
                                  struct irq_data *irq_data, bool early)
 {
        struct amd_ir_data *data = irq_data->chip_data;
        struct irq_2_irte *irte_info = &data->irq_2_irte;
        struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+       struct irq_cfg *cfg = irqd_cfg(irq_data);
 
-       if (iommu)
-               iommu->irte_ops->activate(data->entry, irte_info->devid,
-                                         irte_info->index);
+       if (!iommu)
+               return 0;
+
+       iommu->irte_ops->activate(data->entry, irte_info->devid,
+                                 irte_info->index);
+       amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
        return 0;
 }
 
@@ -4267,6 +4276,22 @@ static int amd_ir_set_vcpu_affinity(stru
        return modify_irte_ga(irte_info->devid, irte_info->index, irte, 
ir_data);
 }
 
+
+static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
+                              struct amd_ir_data *ir_data,
+                              struct irq_2_irte *irte_info,
+                              struct irq_cfg *cfg)
+{
+
+       /*
+        * Atomically updates the IRTE with the new destination, vector
+        * and flushes the interrupt entry cache.
+        */
+       iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
+                                     irte_info->index, cfg->vector,
+                                     cfg->dest_apicid);
+}
+
 static int amd_ir_set_affinity(struct irq_data *data,
                               const struct cpumask *mask, bool force)
 {
@@ -4284,13 +4309,7 @@ static int amd_ir_set_affinity(struct ir
        if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
                return ret;
 
-       /*
-        * Atomically updates the IRTE with the new destination, vector
-        * and flushes the interrupt entry cache.
-        */
-       iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
-                           irte_info->index, cfg->vector, cfg->dest_apicid);
-
+       amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
        /*
         * After this point, all the interrupts will start arriving
         * at the new destination. So, time to cleanup the previous


_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to