Module: xenomai-forge
Branch: master
Commit: ea9cb55c849ea66a5063e589d6c116688f5518fd
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=ea9cb55c849ea66a5063e589d6c116688f5518fd

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sat Jun 15 13:27:46 2013 +0200

nucleus: fix CPU-indexed iterations

Online CPU ids may spread over the possible CPU map. Use
for_each_online_cpu() when appropriate, and stop making the wrong
assumption that CPU ids must range from 0..num_online_cpus()-1.

---

 kernel/cobalt/nucleus/heap.c   |    4 ++--
 kernel/cobalt/nucleus/intr.c   |   23 ++++++++++++++---------
 kernel/cobalt/nucleus/module.c |    3 ++-
 kernel/cobalt/nucleus/pod.c    |   15 +++++++--------
 kernel/cobalt/nucleus/timer.c  |   15 +++++++--------
 5 files changed, 32 insertions(+), 28 deletions(-)

diff --git a/kernel/cobalt/nucleus/heap.c b/kernel/cobalt/nucleus/heap.c
index 811a5e1..e49b542 100644
--- a/kernel/cobalt/nucleus/heap.c
+++ b/kernel/cobalt/nucleus/heap.c
@@ -254,9 +254,9 @@ static void init_extent(xnheap_t *heap, xnextent_t *extent)
 int xnheap_init(xnheap_t *heap,
                void *heapaddr, u_long heapsize, u_long pagesize)
 {
-       unsigned cpu, nr_cpus = num_online_cpus();
        u_long hdrsize, shiftsize, pageshift;
        xnextent_t *extent;
+       unsigned int cpu;
        spl_t s;
 
        /*
@@ -311,7 +311,7 @@ int xnheap_init(xnheap_t *heap,
 
        heap->ubytes = 0;
        heap->maxcont = heap->npages * pagesize;
-       for (cpu = 0; cpu < nr_cpus; cpu++)
+       for_each_online_cpu(cpu)
                heap->idleq[cpu] = NULL;
        inith(&heap->link);
        inith(&heap->stat_link);
diff --git a/kernel/cobalt/nucleus/intr.c b/kernel/cobalt/nucleus/intr.c
index b84c7b5..4a87517 100644
--- a/kernel/cobalt/nucleus/intr.c
+++ b/kernel/cobalt/nucleus/intr.c
@@ -908,8 +908,8 @@ int xnintr_query_init(xnintr_iterator_t *iterator)
         * xnintr_list_rev and old xnintr_count here. The other way
         * around is not a problem as xnintr_query() will notice this
         * fact later.  Should xnintr_list_rev change later,
-        * xnintr_query() will trigger an appropriate error below. */
-
+        * xnintr_query() will trigger an appropriate error below.
+        */
        iterator->list_rev = xnintr_list_rev;
        xnarch_memory_barrier();
 
@@ -918,13 +918,16 @@ int xnintr_query_init(xnintr_iterator_t *iterator)
 
 int xnintr_query_next(int irq, xnintr_iterator_t *iterator, char *name_buf)
 {
-       int cpu = iterator->cpu + 1;
        xnticks_t last_switch;
+       int ret = 0, cpu;
        xnintr_t *intr;
-       int ret = 0;
        spl_t s;
 
-       if (cpu == num_online_cpus())
+       for (cpu = iterator->cpu + 1; cpu < num_present_cpus(); ++cpu) {
+               if (cpu_online(cpu))
+                       break;
+       }
+       if (cpu == num_present_cpus())
                cpu = 0;
        iterator->cpu = cpu;
 
@@ -943,7 +946,7 @@ int xnintr_query_next(int irq, xnintr_iterator_t *iterator, 
char *name_buf)
        } else
                intr = xnintr_shirq_next(iterator->prev);
 
-       if (!intr) {
+       if (intr == NULL) {
                cpu = -1;
                iterator->prev = NULL;
                ret = -ENODEV;
@@ -965,9 +968,11 @@ int xnintr_query_next(int irq, xnintr_iterator_t 
*iterator, char *name_buf)
        intr->stat[cpu].account.total = 0;
        intr->stat[cpu].account.start = last_switch;
 
-       /* Proceed to next entry in shared IRQ chain when all CPUs
-        * have been visited for this one. */
-       if (cpu + 1 == num_online_cpus())
+       /*
+        * Proceed to next entry in shared IRQ chain when all CPUs
+        * have been visited for this one.
+        */
+       if (cpu + 1 == num_present_cpus())
                iterator->prev = intr;
 
      unlock_and_exit:
diff --git a/kernel/cobalt/nucleus/module.c b/kernel/cobalt/nucleus/module.c
index 1f572af..545bdc3 100644
--- a/kernel/cobalt/nucleus/module.c
+++ b/kernel/cobalt/nucleus/module.c
@@ -82,9 +82,10 @@ static int __init mach_setup(void)
 
 #ifdef CONFIG_SMP
        cpus_clear(xnarch_machdata.supported_cpus);
-       for (cpu = 0; cpu < num_online_cpus(); cpu++)
+       for_each_online_cpu(cpu) {
                if (supported_cpus_arg & (1UL << cpu))
                        cpu_set(cpu, xnarch_machdata.supported_cpus);
+       }
 #endif /* CONFIG_SMP */
 
        ret = ipipe_select_timers(&xnarch_supported_cpus);
diff --git a/kernel/cobalt/nucleus/pod.c b/kernel/cobalt/nucleus/pod.c
index f0d26b7..684c5ef 100644
--- a/kernel/cobalt/nucleus/pod.c
+++ b/kernel/cobalt/nucleus/pod.c
@@ -145,7 +145,6 @@ static inline void __xnpod_switch_fpu(struct xnsched *sched)
 void xnpod_fatal(const char *format, ...)
 {
        static char msg_buf[1024];
-       const unsigned nr_cpus = num_online_cpus();
        struct xnthread *thread;
        xnholder_t *holder;
        xnsched_t *sched;
@@ -175,7 +174,7 @@ void xnpod_fatal(const char *format, ...)
        printk(KERN_ERR "\n %-3s  %-6s %-8s %-8s %-8s  %s\n",
               "CPU", "PID", "PRI", "TIMEOUT", "STAT", "NAME");
 
-       for (cpu = 0; cpu < nr_cpus; ++cpu) {
+       for_each_online_cpu(cpu) {
                sched = xnpod_sched_slot(cpu);
 
                holder = getheadq(&nkpod->threadq);
@@ -255,11 +254,10 @@ static void xnpod_flush_heap(xnheap_t *heap,
 int xnpod_init(void)
 {
        extern int xeno_nucleus_status;
-       int cpu, nr_cpus = num_online_cpus();
        struct xnsched *sched;
        struct xnpod *pod;
        void *heapaddr;
-       int ret;
+       int ret, cpu;
        spl_t s;
 
        if (xeno_nucleus_status < 0)
@@ -291,7 +289,7 @@ int xnpod_init(void)
        }
        xnheap_set_label(&kheap, "main heap");
 
-       for (cpu = 0; cpu < nr_cpus; ++cpu) {
+       for_each_online_cpu(cpu) {
                sched = &pod->sched[cpu];
                xnsched_init(sched, cpu);
                if (xnarch_cpu_supported(cpu))
@@ -383,7 +381,7 @@ void xnpod_shutdown(int xtype)
 
        __clrbits(nkpod->status, XNPEXEC);
 
-       for (cpu = 0; cpu < num_online_cpus(); cpu++) {
+       for_each_online_cpu(cpu) {
                sched = xnpod_sched_slot(cpu);
                xnsched_destroy(sched);
        }
@@ -2116,7 +2114,7 @@ int xnpod_enable_timesource(void)
        nkclock.wallclock_offset =
                xnclock_get_host_time() - xnclock_read_monotonic();
 
-       for (cpu = 0; cpu < num_online_cpus(); cpu++) {
+       for_each_online_cpu(cpu) {
 
                if (!xnarch_cpu_supported(cpu))
                        continue;
@@ -2200,9 +2198,10 @@ void xnpod_disable_timesource(void)
         * timer, since this could cause deadlock situations to arise
         * on SMP systems.
         */
-       for (cpu = 0; cpu < num_online_cpus(); cpu++)
+       for_each_online_cpu(cpu) {
                if (xnarch_cpu_supported(cpu))
                        xntimer_release_hardware(cpu);
+       }
 
        xntimer_freeze();
 
diff --git a/kernel/cobalt/nucleus/timer.c b/kernel/cobalt/nucleus/timer.c
index cea7c5f..cf32869 100644
--- a/kernel/cobalt/nucleus/timer.c
+++ b/kernel/cobalt/nucleus/timer.c
@@ -196,12 +196,12 @@ enqueue:
 
 void xntimer_adjust_all(xnsticks_t delta)
 {
-       unsigned cpu, nr_cpus;
+       unsigned int cpu;
        xnqueue_t adjq;
 
        initq(&adjq);
        delta = xnarch_ns_to_tsc(delta);
-       for (cpu = 0, nr_cpus = num_online_cpus(); cpu < nr_cpus; cpu++) {
+       for_each_online_cpu (cpu) {
                xnsched_t *sched = xnpod_sched_slot(cpu);
                xntimerq_t *q = &sched->timerqueue;
                xnholder_t *adjholder;
@@ -783,18 +783,17 @@ EXPORT_SYMBOL_GPL(xntimer_get_overruns);
 
 void xntimer_freeze(void)
 {
-       int nr_cpus, cpu;
+       xntimerq_t *timerq;
+       xntimerh_t *holder;
+       int cpu;
        spl_t s;
 
        trace_mark(xn_nucleus, timer_freeze, MARK_NOARGS);
 
        xnlock_get_irqsave(&nklock, s);
 
-       nr_cpus = num_online_cpus();
-
-       for (cpu = 0; cpu < nr_cpus; cpu++) {
-               xntimerq_t *timerq = &xnpod_sched_slot(cpu)->timerqueue;
-               xntimerh_t *holder;
+       for_each_online_cpu(cpu) {
+               timerq = &xnpod_sched_slot(cpu)->timerqueue;
                while ((holder = xntimerq_head(timerq)) != NULL) {
                        __setbits(aplink2timer(holder)->status, 
XNTIMER_DEQUEUED);
                        xntimerq_remove(timerq, holder);


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to