The vector will be targeting a single CPU at a time, so passing a mask is not needed. Simplify the interface and adjust callers to make use of it.
Signed-off-by: Roger Pau Monné <[email protected]> --- xen/arch/x86/hpet.c | 2 +- xen/arch/x86/include/asm/irq.h | 2 +- xen/arch/x86/io_apic.c | 2 +- xen/arch/x86/irq.c | 23 ++++++++++------------- 4 files changed, 13 insertions(+), 16 deletions(-) diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c index a69abe2650a8..abf4eaf86db1 100644 --- a/xen/arch/x86/hpet.c +++ b/xen/arch/x86/hpet.c @@ -352,7 +352,7 @@ static int __init hpet_setup_msi_irq(struct hpet_event_channel *ch) * Technically we don't want to bind the IRQ to any CPU yet, but we need to * specify at least one online one here. Use the BSP. */ - ret = bind_irq_vector(ch->msi.irq, HPET_BROADCAST_VECTOR, cpumask_of(0)); + ret = bind_irq_vector(ch->msi.irq, HPET_BROADCAST_VECTOR, 0); if ( ret ) return ret; cpumask_setall(desc->affinity); diff --git a/xen/arch/x86/include/asm/irq.h b/xen/arch/x86/include/asm/irq.h index df7b48c8653e..355332188932 100644 --- a/xen/arch/x86/include/asm/irq.h +++ b/xen/arch/x86/include/asm/irq.h @@ -199,7 +199,7 @@ void setup_vector_irq(unsigned int cpu); void move_native_irq(struct irq_desc *desc); void move_masked_irq(struct irq_desc *desc); -int bind_irq_vector(int irq, int vector, const cpumask_t *mask); +int bind_irq_vector(int irq, int vector, unsigned int cpu); void cf_check end_nonmaskable_irq(struct irq_desc *desc, uint8_t vector); void irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask); diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c index 19960d291c47..dfbe27b12d54 100644 --- a/xen/arch/x86/io_apic.c +++ b/xen/arch/x86/io_apic.c @@ -1859,7 +1859,7 @@ static void __init check_timer(void) vector = IRQ0_VECTOR; clear_irq_vector(0); - if ((ret = bind_irq_vector(0, vector, &cpumask_all))) + if ((ret = bind_irq_vector(0, vector, smp_processor_id()))) printk(KERN_ERR"..IRQ0 is not set correctly with ioapic!!!, err:%d\n", ret); irq_desc[0].status &= ~IRQ_DISABLED; diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c index 5cd934ea2a32..e09559fce856 100644 --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -150,26 +150,23 @@ static void trace_irq_mask(uint32_t event, int irq, int vector, } static int __init _bind_irq_vector(struct irq_desc *desc, int vector, - const cpumask_t *cpu_mask) + unsigned int cpu) { - cpumask_t online_mask; - int cpu; - BUG_ON((unsigned)vector >= X86_IDT_VECTORS); - cpumask_and(&online_mask, cpu_mask, &cpu_online_map); - if (cpumask_empty(&online_mask)) + if ( !cpu_online(cpu) ) return -EINVAL; if ( (desc->arch.vector == vector) && - cpumask_equal(desc->arch.cpu_mask, &online_mask) ) + cpumask_test_cpu(cpu, desc->arch.cpu_mask) ) return 0; if ( desc->arch.vector != IRQ_VECTOR_UNASSIGNED ) return -EBUSY; - trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, desc->irq, vector, &online_mask); - for_each_cpu(cpu, &online_mask) - per_cpu(vector_irq, cpu)[vector] = desc->irq; + + trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, desc->irq, vector, cpumask_of(cpu)); + per_cpu(vector_irq, cpu)[vector] = desc->irq; desc->arch.vector = vector; - cpumask_copy(desc->arch.cpu_mask, &online_mask); + cpumask_clear(desc->arch.cpu_mask); + cpumask_set_cpu(cpu, desc->arch.cpu_mask); if ( desc->arch.used_vectors ) { ASSERT(!test_bit(vector, desc->arch.used_vectors)); @@ -179,7 +176,7 @@ static int __init _bind_irq_vector(struct irq_desc *desc, int vector, return 0; } -int __init bind_irq_vector(int irq, int vector, const cpumask_t *mask) +int __init bind_irq_vector(int irq, int vector, unsigned int cpu) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; @@ -189,7 +186,7 @@ int __init bind_irq_vector(int irq, int vector, const cpumask_t *mask) spin_lock_irqsave(&desc->lock, flags); spin_lock(&vector_lock); - ret = _bind_irq_vector(desc, vector, mask); + ret = _bind_irq_vector(desc, vector, cpu); spin_unlock(&vector_lock); spin_unlock_irqrestore(&desc->lock, flags); -- 2.51.0
