Module: xenomai-3
Branch: stable-3.0.x
Commit: 1dfe87355429f9d08eb5837b1f56f5de96a8dc95
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=1dfe87355429f9d08eb5837b1f56f5de96a8dc95

Author: Philippe Gerum <r...@xenomai.org>
Date:   Sun Nov  8 20:45:12 2015 +0100

cobalt/kernel: convert to current cpumask operators

---

 include/cobalt/kernel/sched.h       |    4 ++--
 kernel/cobalt/clock.c               |    3 +--
 kernel/cobalt/init.c                |    4 ++--
 kernel/cobalt/posix/process.c       |    8 ++++----
 kernel/cobalt/sched.c               |   22 +++++++++++-----------
 kernel/cobalt/thread.c              |   18 +++++++++---------
 kernel/cobalt/timer.c               |    2 +-
 kernel/drivers/testing/switchtest.c |    6 +++---
 8 files changed, 33 insertions(+), 34 deletions(-)

diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index 1c11756..fe1df7d 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -221,7 +221,7 @@ static inline void xnsched_set_resched(struct xnsched 
*sched)
        if (current_sched == sched)
                current_sched->status |= XNRESCHED;
        else if (!xnsched_resched_p(sched)) {
-               cpu_set(xnsched_cpu(sched), current_sched->resched);
+               cpumask_set_cpu(xnsched_cpu(sched), &current_sched->resched);
                sched->status |= XNRESCHED;
                current_sched->status |= XNRESCHED;
        }
@@ -231,7 +231,7 @@ static inline void xnsched_set_resched(struct xnsched 
*sched)
 
 static inline int xnsched_supported_cpu(int cpu)
 {
-       return cpu_isset(cpu, xnsched_realtime_cpus);
+       return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
 }
 
 #else /* !CONFIG_SMP */
diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c
index e75d296..5ee3eeb 100644
--- a/kernel/cobalt/clock.c
+++ b/kernel/cobalt/clock.c
@@ -188,8 +188,7 @@ void xnclock_core_local_shot(struct xnsched *sched)
 #ifdef CONFIG_SMP
 void xnclock_core_remote_shot(struct xnsched *sched)
 {
-       cpumask_t mask = cpumask_of_cpu(xnsched_cpu(sched));
-       ipipe_send_ipi(IPIPE_HRTIMER_IPI, mask);
+       ipipe_send_ipi(IPIPE_HRTIMER_IPI, *cpumask_of(xnsched_cpu(sched)));
 }
 #endif
 
diff --git a/kernel/cobalt/init.c b/kernel/cobalt/init.c
index d89e21d..cc77b37 100644
--- a/kernel/cobalt/init.c
+++ b/kernel/cobalt/init.c
@@ -339,10 +339,10 @@ static int __init xenomai_init(void)
        }
 
 #ifdef CONFIG_SMP
-       cpus_clear(xnsched_realtime_cpus);
+       cpumask_clear(&xnsched_realtime_cpus);
        for_each_online_cpu(cpu) {
                if (supported_cpus_arg & (1UL << cpu))
-                       cpu_set(cpu, xnsched_realtime_cpus);
+                       cpumask_set_cpu(cpu, &xnsched_realtime_cpus);
        }
        if (cpumask_empty(&xnsched_realtime_cpus)) {
                printk(XENO_WARNING "disabled via empty real-time CPU mask\n");
diff --git a/kernel/cobalt/posix/process.c b/kernel/cobalt/posix/process.c
index a9aeee5..788a24a 100644
--- a/kernel/cobalt/posix/process.c
+++ b/kernel/cobalt/posix/process.c
@@ -841,7 +841,7 @@ static int handle_setaffinity_event(struct 
ipipe_cpu_migration_data *d)
         * affinity mask accordingly.
         */
        xnlock_get_irqsave(&nklock, s);
-       cpus_and(thread->affinity, p->cpus_allowed, cobalt_cpu_affinity);
+       cpumask_and(&thread->affinity, &p->cpus_allowed, &cobalt_cpu_affinity);
        xnthread_run_handler_stack(thread, move_thread, d->dest_cpu);
        xnlock_put_irqrestore(&nklock, s);
 
@@ -857,7 +857,7 @@ static int handle_setaffinity_event(struct 
ipipe_cpu_migration_data *d)
         * (i.e. fully cleared) affinity mask until it leaves primary
         * mode then switches back to it, in SMP configurations.
         */
-       if (cpus_empty(thread->affinity))
+       if (cpumask_empty(&thread->affinity))
                printk(XENO_WARNING "thread %s[%d] changed CPU affinity 
inconsistently\n",
                       thread->name, xnthread_host_pid(thread));
        else {
@@ -917,8 +917,8 @@ static inline void check_affinity(struct task_struct *p) /* 
nklocked, IRQs off *
         * which is not part of its original affinity mask
         * though. Assume user wants to extend this mask.
         */
-       if (!cpu_isset(cpu, thread->affinity))
-               cpu_set(cpu, thread->affinity);
+       if (!cpumask_test_cpu(cpu, &thread->affinity))
+               cpumask_set_cpu(cpu, &thread->affinity);
 
        xnthread_migrate_passive(thread, sched);
 }
diff --git a/kernel/cobalt/sched.c b/kernel/cobalt/sched.c
index fab0d7c..0b7b93c 100644
--- a/kernel/cobalt/sched.c
+++ b/kernel/cobalt/sched.c
@@ -162,7 +162,7 @@ void xnsched_init(struct xnsched *sched, int cpu)
        ksformat(htimer_name, sizeof(htimer_name), "[host-timer/%u]", cpu);
        ksformat(rrbtimer_name, sizeof(rrbtimer_name), "[rrb-timer/%u]", cpu);
        ksformat(root_name, sizeof(root_name), "ROOT/%u", cpu);
-       cpus_clear(sched->resched);
+       cpumask_clear(&sched->resched);
 #else
        strcpy(htimer_name, "[host-timer]");
        strcpy(rrbtimer_name, "[rrb-timer]");
@@ -181,7 +181,7 @@ void xnsched_init(struct xnsched *sched, int cpu)
        attr.flags = XNROOT | XNFPU;
        attr.name = root_name;
        attr.personality = &xenomai_personality;
-       attr.affinity = cpumask_of_cpu(cpu);
+       attr.affinity = *cpumask_of(cpu);
        param.idle.prio = XNSCHED_IDLE_PRIO;
 
        __xnthread_init(&sched->rootcb, &attr,
@@ -772,10 +772,10 @@ static inline int test_resched(struct xnsched *sched)
        int resched = xnsched_resched_p(sched);
 #ifdef CONFIG_SMP
        /* Send resched IPI to remote CPU(s). */
-       if (unlikely(!cpus_empty(sched->resched))) {
+       if (unlikely(!cpumask_empty(&sched->resched))) {
                smp_mb();
                ipipe_send_ipi(IPIPE_RESCHEDULE_IPI, sched->resched);
-               cpus_clear(sched->resched);
+               cpumask_clear(&sched->resched);
        }
 #endif
        sched->status &= ~XNRESCHED;
@@ -1304,7 +1304,7 @@ static int affinity_vfile_show(struct 
xnvfile_regular_iterator *it,
        int cpu;
 
        for (cpu = 0; cpu < BITS_PER_LONG; cpu++)
-               if (cpu_isset(cpu, cobalt_cpu_affinity))
+               if (cpumask_test_cpu(cpu, &cobalt_cpu_affinity))
                        val |= (1UL << cpu);
 
        xnvfile_printf(it, "%08lx\n", val);
@@ -1327,23 +1327,23 @@ static ssize_t affinity_vfile_store(struct 
xnvfile_input *input)
        if (val == 0)
                affinity = xnsched_realtime_cpus; /* Reset to default. */
        else {
-               cpus_clear(affinity);
+               cpumask_clear(&affinity);
                for (cpu = 0; cpu < BITS_PER_LONG; cpu++, val >>= 1) {
                        if (val & 1)
-                               cpu_set(cpu, affinity);
+                               cpumask_set_cpu(cpu, &affinity);
                }
        }
 
-       cpus_and(set, affinity, *cpu_online_mask);
-       if (cpus_empty(set))
+       cpumask_and(&set, &affinity, cpu_online_mask);
+       if (cpumask_empty(&set))
                return -EINVAL;
 
        /*
         * The new dynamic affinity must be a strict subset of the
         * static set of supported CPUs.
         */
-       cpus_or(set, affinity, xnsched_realtime_cpus);
-       if (!cpus_equal(set, xnsched_realtime_cpus))
+       cpumask_or(&set, &affinity, &xnsched_realtime_cpus);
+       if (!cpumask_equal(&set, &xnsched_realtime_cpus))
                return -EINVAL;
 
        xnlock_get_irqsave(&nklock, s);
diff --git a/kernel/cobalt/thread.c b/kernel/cobalt/thread.c
index 5fdf919..bffa5b8 100644
--- a/kernel/cobalt/thread.c
+++ b/kernel/cobalt/thread.c
@@ -175,7 +175,7 @@ int __xnthread_init(struct xnthread *thread,
                flags |= XNDEBUG;
 
        thread->personality = attr->personality;
-       cpus_and(thread->affinity, attr->affinity, cobalt_cpu_affinity);
+       cpumask_and(&thread->affinity, &attr->affinity, &cobalt_cpu_affinity);
        thread->sched = sched;
        thread->state = flags;
        thread->info = 0;
@@ -620,11 +620,11 @@ int xnthread_init(struct xnthread *thread,
         * affinity mask, and therefore also part of the supported
         * CPUs. This CPU may change in pin_to_initial_cpu().
         */
-       cpus_and(affinity, attr->affinity, cobalt_cpu_affinity);
-       if (cpus_empty(affinity))
+       cpumask_and(&affinity, &attr->affinity, &cobalt_cpu_affinity);
+       if (cpumask_empty(&affinity))
                return -EINVAL;
 
-       sched = xnsched_struct(first_cpu(affinity));
+       sched = xnsched_struct(cpumask_first(&affinity));
 
        ret = __xnthread_init(thread, attr, sched, sched_class, sched_param);
        if (ret)
@@ -1691,7 +1691,7 @@ int xnthread_migrate(int cpu)
                goto unlock_and_exit;
        }
 
-       if (!cpu_isset(cpu, curr->affinity)) {
+       if (!cpumask_test_cpu(cpu, &curr->affinity)) {
                ret = -EINVAL;
                goto unlock_and_exit;
        }
@@ -2087,7 +2087,7 @@ void xnthread_relax(int notify, int reason)
        if (xnthread_test_localinfo(thread, XNMOVED)) {
                xnthread_clear_localinfo(thread, XNMOVED);
                cpu = xnsched_cpu(thread->sched);
-               set_cpus_allowed(p, cpumask_of_cpu(cpu));
+               set_cpus_allowed(p, *cpumask_of(cpu));
        }
 #endif
 
@@ -2332,10 +2332,10 @@ void xnthread_pin_initial(struct xnthread *thread)
         * to the first CPU of that mask.
         */
        cpu = task_cpu(p);
-       if (!cpu_isset(cpu, thread->affinity))
-               cpu = first_cpu(thread->affinity);
+       if (!cpumask_test_cpu(cpu, &thread->affinity))
+               cpu = cpumask_first(&thread->affinity);
 
-       set_cpus_allowed(p, cpumask_of_cpu(cpu));
+       set_cpus_allowed(p, *cpumask_of(cpu));
        /*
         * @thread is still unstarted Xenomai-wise, we are precisely
         * in the process of mapping the current kernel task to
diff --git a/kernel/cobalt/timer.c b/kernel/cobalt/timer.c
index bd986b6..526c615 100644
--- a/kernel/cobalt/timer.c
+++ b/kernel/cobalt/timer.c
@@ -359,7 +359,7 @@ void __xntimer_init(struct xntimer *timer,
        else {
                cpu = ipipe_processor_id();
                if (!xnsched_supported_cpu(cpu))
-                       cpu = first_cpu(xnsched_realtime_cpus);
+                       cpu = cpumask_first(&xnsched_realtime_cpus);
 
                timer->sched = xnsched_struct(cpu);
        }
diff --git a/kernel/drivers/testing/switchtest.c 
b/kernel/drivers/testing/switchtest.c
index a93a53a..976b1f8 100644
--- a/kernel/drivers/testing/switchtest.c
+++ b/kernel/drivers/testing/switchtest.c
@@ -492,10 +492,10 @@ static int rtswitch_create_ktask(struct rtswitch_context 
*ctx,
        iattr.name = name;
        iattr.flags = init_flags;
        iattr.personality = &xenomai_personality;
-       iattr.affinity = cpumask_of_cpu(ctx->cpu);
+       iattr.affinity = *cpumask_of(ctx->cpu);
        param.rt.prio = 1;
 
-       set_cpus_allowed(current, cpumask_of_cpu(ctx->cpu));
+       set_cpus_allowed(current, *cpumask_of(ctx->cpu));
 
        err = xnthread_init(&task->ktask,
                            &iattr, &xnsched_class_rt, &param);
@@ -550,7 +550,7 @@ static void rtswitch_close(struct rtdm_fd *fd)
        unsigned int i;
 
        if (ctx->tasks) {
-               set_cpus_allowed(current, cpumask_of_cpu(ctx->cpu));
+               set_cpus_allowed(current, *cpumask_of(ctx->cpu));
 
                for (i = 0; i < ctx->next_index; i++) {
                        struct rtswitch_task *task = &ctx->tasks[i];


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://xenomai.org/mailman/listinfo/xenomai-git

Reply via email to