Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=7ccb4a662462616f6be5053e26b79580e02f1529
Commit:     7ccb4a662462616f6be5053e26b79580e02f1529
Parent:     5a26f6bbb767d7ad23311a1e81cfdd2bebefb855
Author:     Mohan Kumar M <[EMAIL PROTECTED]>
AuthorDate: Wed Jun 13 00:51:57 2007 +1000
Committer:  Paul Mackerras <[EMAIL PROTECTED]>
CommitDate: Mon Jun 25 17:03:31 2007 +1000

    [POWERPC] Fix interrupt distribution in ppc970
    
    In some of the PPC970 based systems, interrupt would be distributed to
    offline cpus also even when booted with "maxcpus=1".  So check whether
    cpu online map and cpu present map are equal or not.  If they are equal
    default_distrib_server is used as interrupt server otherwise boot cpu
    (default_server) used as interrupt server.
    
    In addition to this, if an interrupt is assigned to a specific cpu (ie
    smp affinity) and if that cpu is not online, the earlier code used to
    return the default_distrib_server as interrupt server.  This
    introduces an additional parameter to the get_irq function, called
    strict_check.  Based on this parameter, if the cpu is not online
    either default_distrib_server or -1 is returned.
    
    Signed-off-by: Mohan Kumar M <[EMAIL PROTECTED]>
    Cc: Michael Ellerman <[EMAIL PROTECTED]>
    Acked-by: Milton Miller <[EMAIL PROTECTED]>
    Signed-off-by: Paul Mackerras <[EMAIL PROTECTED]>
---
 arch/powerpc/platforms/pseries/xics.c |   53 ++++++++++++++++++---------------
 1 files changed, 29 insertions(+), 24 deletions(-)

diff --git a/arch/powerpc/platforms/pseries/xics.c 
b/arch/powerpc/platforms/pseries/xics.c
index f1df942..5bd90a7 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -156,9 +156,9 @@ static inline void lpar_qirr_info(int n_cpu , u8 value)
 
 
 #ifdef CONFIG_SMP
-static int get_irq_server(unsigned int virq)
+static int get_irq_server(unsigned int virq, unsigned int strict_check)
 {
-       unsigned int server;
+       int server;
        /* For the moment only implement delivery to all cpus or one cpu */
        cpumask_t cpumask = irq_desc[virq].affinity;
        cpumask_t tmp = CPU_MASK_NONE;
@@ -166,22 +166,25 @@ static int get_irq_server(unsigned int virq)
        if (!distribute_irqs)
                return default_server;
 
-       if (cpus_equal(cpumask, CPU_MASK_ALL)) {
-               server = default_distrib_server;
-       } else {
+       if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
                cpus_and(tmp, cpu_online_map, cpumask);
 
-               if (cpus_empty(tmp))
-                       server = default_distrib_server;
-               else
-                       server = get_hard_smp_processor_id(first_cpu(tmp));
+               server = first_cpu(tmp);
+
+               if (server < NR_CPUS)
+                       return get_hard_smp_processor_id(server);
+
+               if (strict_check)
+                       return -1;
        }
 
-       return server;
+       if (cpus_equal(cpu_online_map, cpu_present_map))
+               return default_distrib_server;
 
+       return default_server;
 }
 #else
-static int get_irq_server(unsigned int virq)
+static int get_irq_server(unsigned int virq, unsigned int strict_check)
 {
        return default_server;
 }
@@ -192,7 +195,7 @@ static void xics_unmask_irq(unsigned int virq)
 {
        unsigned int irq;
        int call_status;
-       unsigned int server;
+       int server;
 
        pr_debug("xics: unmask virq %d\n", virq);
 
@@ -201,7 +204,7 @@ static void xics_unmask_irq(unsigned int virq)
        if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
                return;
 
-       server = get_irq_server(virq);
+       server = get_irq_server(virq, 0);
 
        call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
                                DEFAULT_PRIORITY);
@@ -398,8 +401,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t 
cpumask)
        unsigned int irq;
        int status;
        int xics_status[2];
-       unsigned long newmask;
-       cpumask_t tmp = CPU_MASK_NONE;
+       int irq_server;
 
        irq = (unsigned int)irq_map[virq].hwirq;
        if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
@@ -413,18 +415,21 @@ static void xics_set_affinity(unsigned int virq, 
cpumask_t cpumask)
                return;
        }
 
-       /* For the moment only implement delivery to all cpus or one cpu */
-       if (cpus_equal(cpumask, CPU_MASK_ALL)) {
-               newmask = default_distrib_server;
-       } else {
-               cpus_and(tmp, cpu_online_map, cpumask);
-               if (cpus_empty(tmp))
-                       return;
-               newmask = get_hard_smp_processor_id(first_cpu(tmp));
+       /*
+        * For the moment only implement delivery to all cpus or one cpu.
+        * Get current irq_server for the given irq
+        */
+       irq_server = get_irq_server(irq, 1);
+       if (irq_server == -1) {
+               char cpulist[128];
+               cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
+               printk(KERN_WARNING "xics_set_affinity: No online cpus in "
+                               "the mask %s for irq %d\n", cpulist, virq);
+               return;
        }
 
        status = rtas_call(ibm_set_xive, 3, 1, NULL,
-                               irq, newmask, xics_status[1]);
+                               irq, irq_server, xics_status[1]);
 
        if (status) {
                printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to