This reverts commit 2cffad7bad83157f89332872015f4305d2ac09ac.

Lost interrupts upon resume, characterised by
 <0>[ 1806.220332] do_IRQ: 4.33 No irq handler for vecto
---
 arch/x86/include/asm/apic.h   |  1 -
 arch/x86/include/asm/irq.h    |  4 ++
 arch/x86/kernel/apic/vector.c | 32 +-------------
 arch/x86/kernel/irq.c         | 99 +++++++++++++++++++++++++++++++++++++++++++
 arch/x86/kernel/smpboot.c     |  2 +-
 5 files changed, 105 insertions(+), 33 deletions(-)

diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index a9e57f08bfa6..7a8651921ed5 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -386,7 +386,6 @@ extern struct apic *__apicdrivers[], *__apicdrivers_end[];
  */
 #ifdef CONFIG_SMP
 extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
-extern int lapic_can_unplug_cpu(void);
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 2395bb794c7b..d8632f8fa17d 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -26,7 +26,11 @@ extern void irq_ctx_init(int cpu);
 
 struct irq_desc;
 
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpumask.h>
+extern int check_irq_vectors_for_cpu_disable(void);
 extern void fixup_irqs(void);
+#endif
 
 #ifdef CONFIG_HAVE_KVM
 extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 6a823a25eaff..b9f96e506724 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -970,37 +970,7 @@ void irq_force_complete_move(struct irq_desc *desc)
 unlock:
        raw_spin_unlock(&vector_lock);
 }
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * Note, this is not accurate accounting, but at least good enough to
- * prevent that the actual interrupt move will run out of vectors.
- */
-int lapic_can_unplug_cpu(void)
-{
-       unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
-       int ret = 0;
-
-       raw_spin_lock(&vector_lock);
-       tomove = irq_matrix_allocated(vector_matrix);
-       avl = irq_matrix_available(vector_matrix, true);
-       if (avl < tomove) {
-               pr_warn("CPU %u has %u vectors, %u available. Cannot disable 
CPU\n",
-                       cpu, tomove, avl);
-               ret = -ENOSPC;
-               goto out;
-       }
-       rsvd = irq_matrix_reserved(vector_matrix);
-       if (avl < rsvd) {
-               pr_warn("Reserved vectors %u > available %u. IRQ request may 
fail\n",
-                       rsvd, avl);
-       }
-out:
-       raw_spin_unlock(&vector_lock);
-       return ret;
-}
-#endif /* HOTPLUG_CPU */
-#endif /* SMP */
+#endif
 
 static void __init print_APIC_field(int base)
 {
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 49cfd9fe7589..188990c3a514 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -333,6 +333,105 @@ __visible void smp_kvm_posted_intr_nested_ipi(struct 
pt_regs *regs)
 
 
 #ifdef CONFIG_HOTPLUG_CPU
+
+/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
+ * below, which is protected by stop_machine().  Putting them on the stack
+ * results in a stack frame overflow.  Dynamically allocating could result in a
+ * failure so declare these two cpumasks as global.
+ */
+static struct cpumask affinity_new, online_new;
+
+/*
+ * This cpu is going to be removed and its vectors migrated to the remaining
+ * online cpus.  Check to see if there are enough vectors in the remaining 
cpus.
+ * This function is protected by stop_machine().
+ */
+int check_irq_vectors_for_cpu_disable(void)
+{
+       unsigned int this_cpu, vector, this_count, count;
+       struct irq_desc *desc;
+       struct irq_data *data;
+       int cpu;
+
+       this_cpu = smp_processor_id();
+       cpumask_copy(&online_new, cpu_online_mask);
+       cpumask_clear_cpu(this_cpu, &online_new);
+
+       this_count = 0;
+       for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+               desc = __this_cpu_read(vector_irq[vector]);
+               if (IS_ERR_OR_NULL(desc))
+                       continue;
+               /*
+                * Protect against concurrent action removal, affinity
+                * changes etc.
+                */
+               raw_spin_lock(&desc->lock);
+               data = irq_desc_get_irq_data(desc);
+               cpumask_copy(&affinity_new,
+                            irq_data_get_affinity_mask(data));
+               cpumask_clear_cpu(this_cpu, &affinity_new);
+
+               /* Do not count inactive or per-cpu irqs. */
+               if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
+                       raw_spin_unlock(&desc->lock);
+                       continue;
+               }
+
+               raw_spin_unlock(&desc->lock);
+               /*
+                * A single irq may be mapped to multiple cpu's
+                * vector_irq[] (for example IOAPIC cluster mode).  In
+                * this case we have two possibilities:
+                *
+                * 1) the resulting affinity mask is empty; that is
+                * this the down'd cpu is the last cpu in the irq's
+                * affinity mask, or
+                *
+                * 2) the resulting affinity mask is no longer a
+                * subset of the online cpus but the affinity mask is
+                * not zero; that is the down'd cpu is the last online
+                * cpu in a user set affinity mask.
+                */
+               if (cpumask_empty(&affinity_new) ||
+                   !cpumask_subset(&affinity_new, &online_new))
+                       this_count++;
+       }
+       /* No need to check any further. */
+       if (!this_count)
+               return 0;
+
+       count = 0;
+       for_each_online_cpu(cpu) {
+               if (cpu == this_cpu)
+                       continue;
+               /*
+                * We scan from FIRST_EXTERNAL_VECTOR to first system
+                * vector. If the vector is marked in the used vectors
+                * bitmap or an irq is assigned to it, we don't count
+                * it as available.
+                *
+                * As this is an inaccurate snapshot anyway, we can do
+                * this w/o holding vector_lock.
+                */
+               for (vector = FIRST_EXTERNAL_VECTOR;
+                    vector < FIRST_SYSTEM_VECTOR; vector++) {
+                       if (!test_bit(vector, system_vectors) &&
+                           IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) {
+                               if (++count == this_count)
+                                       return 0;
+                       }
+               }
+       }
+
+       if (count < this_count) {
+               pr_warn("CPU %d disable failed: CPU has %u vectors assigned and 
there are only %u available.\n",
+                       this_cpu, this_count, count);
+               return -ERANGE;
+       }
+       return 0;
+}
+
 /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
 void fixup_irqs(void)
 {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 3d01df7d7cf6..bdcfc0964b1f 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1478,7 +1478,7 @@ int native_cpu_disable(void)
 {
        int ret;
 
-       ret = lapic_can_unplug_cpu();
+       ret = check_irq_vectors_for_cpu_disable();
        if (ret)
                return ret;
 
-- 
2.15.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to