Module: xenomai-forge
Branch: master
Commit: da0c27ab99f342d73774d3e3db84ef9386caa96d
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=da0c27ab99f342d73774d3e3db84ef9386caa96d

Author: Philippe Gerum <r...@xenomai.org>
Date:   Thu Jun 13 16:34:10 2013 +0200

cobalt: sanitize atomic operation API

---

 include/asm-generic/atomic.h     |   42 +++++++++++--------------------------
 include/cobalt/nucleus/pod.h     |    2 +-
 include/cobalt/nucleus/stat.h    |    2 +-
 include/cobalt/nucleus/synch.h   |   16 +++++++-------
 include/cobalt/nucleus/sys_ppd.h |   21 ++++++++++++++++++-
 include/cobalt/nucleus/thread.h  |    2 +-
 kernel/cobalt/monitor.h          |    2 +-
 kernel/cobalt/mutex.h            |    4 +-
 kernel/cobalt/nucleus/pod.c      |    2 +-
 kernel/cobalt/nucleus/shadow.c   |   26 +++++++++++-----------
 kernel/cobalt/nucleus/synch.c    |   22 ++++++++++----------
 lib/cobalt/printf.c              |   26 +++++++++++-----------
 12 files changed, 85 insertions(+), 82 deletions(-)

diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index a7d05d2..bf1c9cd 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -30,26 +30,10 @@ typedef unsigned long atomic_flags_t;
 #include <asm/atomic.h>
 #include <asm/xenomai/wrappers.h>
 
-typedef atomic_long_t atomic_counter_t;
-typedef atomic_long_t xnarch_atomic_t;
-
 #define xnarch_memory_barrier()                smp_mb()
 #define xnarch_read_memory_barrier()    rmb()
 #define xnarch_write_memory_barrier()   wmb()
 
-#define xnarch_atomic_set(pcounter,i)  atomic_long_set(pcounter,i)
-#define xnarch_atomic_get(pcounter)    atomic_long_read(pcounter)
-#define xnarch_atomic_inc(pcounter)    atomic_long_inc(pcounter)
-#define xnarch_atomic_dec(pcounter)    atomic_long_dec(pcounter)
-#define xnarch_atomic_inc_and_test(pcounter) \
-       atomic_long_inc_and_test(pcounter)
-#define xnarch_atomic_dec_and_test(pcounter) \
-       atomic_long_dec_and_test(pcounter)
-#define xnarch_atomic_cmpxchg(pcounter,old,new) \
-       atomic_long_cmpxchg((pcounter),(old),(new))
-
-#define xnarch_atomic_xchg(ptr,x)      xchg(ptr,x)
-
 /* atomic_set_mask, atomic_clear_mask are not standard among linux
    ports */
 #ifndef xnarch_atomic_set_mask
@@ -61,12 +45,12 @@ typedef atomic_long_t xnarch_atomic_t;
 #endif
 
 #else /* !__KERNEL__ */
+
 #include <xeno_config.h>
 
-#ifndef xnarch_atomic_t
-typedef struct { unsigned long counter; } __xnarch_atomic_t;
-#define xnarch_atomic_t __xnarch_atomic_t
-#endif
+typedef struct {
+       unsigned long v;
+} atomic_long_t;
 
 #ifndef xnarch_memory_barrier
 #define xnarch_memory_barrier() __sync_synchronize()
@@ -84,19 +68,19 @@ typedef struct { unsigned long counter; } __xnarch_atomic_t;
 #define cpu_relax() xnarch_memory_barrier()
 #endif
 
-#ifndef xnarch_atomic_get
-#define xnarch_atomic_get(v)           ((v)->counter)
+#ifndef atomic_long_read
+#define atomic_long_read(p)            ((p)->v)
 #endif
 
-#ifndef xnarch_atomic_set
-#define xnarch_atomic_set(v,i)         (((v)->counter) = (i))
+#ifndef atomic_long_set
+#define atomic_long_set(p, i)          ((p)->v = i)
 #endif
 
-#ifndef xnarch_atomic_cmpxchg
-#define xnarch_atomic_cmpxchg(v, o, n)                 \
-       __sync_val_compare_and_swap(&(v)->counter,      \
-                                   (unsigned long)(o), \
-                                   (unsigned long)(n))
+#ifndef atomic_long_cmpxchg
+#define atomic_long_cmpxchg(p, o, n)                           \
+       __sync_val_compare_and_swap(&(p)->v,                    \
+                                   (typeof((p)->v))(o),        \
+                                   (typeof((p)->v))(n))
 #endif
 
 #endif /* !__KERNEL__ */
diff --git a/include/cobalt/nucleus/pod.h b/include/cobalt/nucleus/pod.h
index 941747f..c4b1bc3 100644
--- a/include/cobalt/nucleus/pod.h
+++ b/include/cobalt/nucleus/pod.h
@@ -75,7 +75,7 @@ struct xnpod {
 #ifdef CONFIG_XENO_OPT_VFILE
        struct xnvfile_rev_tag threadlist_tag;
 #endif
-       atomic_counter_t timerlck; /*!< Timer lock depth.  */
+       atomic_t timerlck;      /*!< Timer lock depth.  */
 
        int refcnt;             /*!< Reference count.  */
 };
diff --git a/include/cobalt/nucleus/stat.h b/include/cobalt/nucleus/stat.h
index 010f4b1..61cfbb3 100644
--- a/include/cobalt/nucleus/stat.h
+++ b/include/cobalt/nucleus/stat.h
@@ -53,7 +53,7 @@ do { \
 #define xnstat_exectime_set_current(sched, new_account) \
 ({ \
        xnstat_exectime_t *__prev; \
-       __prev = xnarch_atomic_xchg(&(sched)->current_account, (new_account)); \
+       __prev = (xnstat_exectime_t 
*)atomic_long_xchg(&(sched)->current_account, (long)(new_account)); \
        __prev; \
 })
 
diff --git a/include/cobalt/nucleus/synch.h b/include/cobalt/nucleus/synch.h
index 3ca4f31..5e3a582 100644
--- a/include/cobalt/nucleus/synch.h
+++ b/include/cobalt/nucleus/synch.h
@@ -40,19 +40,19 @@
 #define XNSYNCH_FLCLAIM XN_HANDLE_SPARE3 /* Corresponding bit in fast lock */
 
 /* Fast lock API */
-static inline int xnsynch_fast_owner_check(xnarch_atomic_t *fastlock,
+static inline int xnsynch_fast_owner_check(atomic_long_t *fastlock,
                                           xnhandle_t ownerh)
 {
-       return (xnhandle_mask_spare(xnarch_atomic_get(fastlock)) == ownerh) ?
+       return (xnhandle_mask_spare(atomic_long_read(fastlock)) == ownerh) ?
                0 : -EPERM;
 }
 
-static inline int xnsynch_fast_acquire(xnarch_atomic_t *fastlock,
+static inline int xnsynch_fast_acquire(atomic_long_t *fastlock,
                                       xnhandle_t new_ownerh)
 {
        xnhandle_t h;
 
-       h = xnarch_atomic_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh);
+       h = atomic_long_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh);
        if (h != XN_NO_HANDLE) {
                if (xnhandle_mask_spare(h) == new_ownerh)
                        return -EBUSY;
@@ -63,10 +63,10 @@ static inline int xnsynch_fast_acquire(xnarch_atomic_t 
*fastlock,
        return 0;
 }
 
-static inline int xnsynch_fast_release(xnarch_atomic_t *fastlock,
+static inline int xnsynch_fast_release(atomic_long_t *fastlock,
                                       xnhandle_t cur_ownerh)
 {
-       return (xnarch_atomic_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE) ==
+       return (atomic_long_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE) ==
                cur_ownerh);
 }
 
@@ -108,7 +108,7 @@ typedef struct xnsynch {
 
     struct xnthread *owner; /* Thread which owns the resource */
 
-    xnarch_atomic_t *fastlock; /* Pointer to fast lock word */
+    atomic_long_t *fastlock; /* Pointer to fast lock word */
 
     void (*cleanup)(struct xnsynch *synch); /* Cleanup handler */
 
@@ -158,7 +158,7 @@ static inline void xnsynch_detect_claimed_relax(struct 
xnthread *owner)
 #endif /* !XENO_DEBUG(SYNCH_RELAX) */
 
 void xnsynch_init(struct xnsynch *synch, xnflags_t flags,
-                 xnarch_atomic_t *fastlock);
+                 atomic_long_t *fastlock);
 
 #define xnsynch_destroy(synch) xnsynch_flush(synch, XNRMID)
 
diff --git a/include/cobalt/nucleus/sys_ppd.h b/include/cobalt/nucleus/sys_ppd.h
index e60fd6d..7c00772 100644
--- a/include/cobalt/nucleus/sys_ppd.h
+++ b/include/cobalt/nucleus/sys_ppd.h
@@ -1,3 +1,22 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
 #ifndef _XENO_NUCLEUS_SYS_PPD_H
 #define _XENO_NUCLEUS_SYS_PPD_H
 
@@ -8,7 +27,7 @@ struct xnsys_ppd {
        struct xnshadow_ppd ppd;
        struct xnheap sem_heap;
        unsigned long mayday_addr;
-       xnarch_atomic_t refcnt;
+       atomic_t refcnt;
        char *exe_path;
 };
 
diff --git a/include/cobalt/nucleus/thread.h b/include/cobalt/nucleus/thread.h
index b27f893..6866864 100644
--- a/include/cobalt/nucleus/thread.h
+++ b/include/cobalt/nucleus/thread.h
@@ -390,7 +390,7 @@ void xnthread_set_sync_window(struct xnthread *thread, int 
bits)
 static inline struct xnthread *
 xnsynch_release(struct xnsynch *synch, struct xnthread *thread)
 {
-       xnarch_atomic_t *lockp;
+       atomic_long_t *lockp;
        xnhandle_t threadh;
 
        XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));
diff --git a/kernel/cobalt/monitor.h b/kernel/cobalt/monitor.h
index 6c5af6a..849dfa2 100644
--- a/kernel/cobalt/monitor.h
+++ b/kernel/cobalt/monitor.h
@@ -28,7 +28,7 @@
 #define COBALT_MONITOR_PENDED     0x08
 
 struct cobalt_monitor_data {
-       xnarch_atomic_t owner;
+       atomic_long_t owner;
        unsigned long flags;
 };
 
diff --git a/kernel/cobalt/mutex.h b/kernel/cobalt/mutex.h
index 3aa1fea..0df4a84 100644
--- a/kernel/cobalt/mutex.h
+++ b/kernel/cobalt/mutex.h
@@ -25,7 +25,7 @@
 struct cobalt_mutex;
 
 struct mutex_dat {
-       xnarch_atomic_t owner;
+       atomic_long_t owner;
        unsigned long flags;
 
 #define COBALT_MUTEX_COND_SIGNAL 0x00000001
@@ -180,7 +180,7 @@ static inline struct mutex_dat *mutex_get_datp(struct 
__shadow_mutex *shadow)
        return shadow->dat;
 }
 
-static inline xnarch_atomic_t *mutex_get_ownerp(struct __shadow_mutex *shadow)
+static inline atomic_long_t *mutex_get_ownerp(struct __shadow_mutex *shadow)
 {
        return &mutex_get_datp(shadow)->owner;
 }
diff --git a/kernel/cobalt/nucleus/pod.c b/kernel/cobalt/nucleus/pod.c
index 1a252bd..8ca156f 100644
--- a/kernel/cobalt/nucleus/pod.c
+++ b/kernel/cobalt/nucleus/pod.c
@@ -279,7 +279,7 @@ int xnpod_init(void)
        pod->status = 0;
        pod->refcnt = 1;
        initq(&pod->threadq);
-       xnarch_atomic_set(&pod->timerlck, 0);
+       atomic_set(&pod->timerlck, 0);
 
        xnlock_put_irqrestore(&nklock, s);
 
diff --git a/kernel/cobalt/nucleus/shadow.c b/kernel/cobalt/nucleus/shadow.c
index 303bd6c..72a7651 100644
--- a/kernel/cobalt/nucleus/shadow.c
+++ b/kernel/cobalt/nucleus/shadow.c
@@ -283,13 +283,13 @@ static void request_syscall_restart(struct xnthread 
*thread,
 
 static inline void lock_timers(void)
 {
-       xnarch_atomic_inc(&nkpod->timerlck);
+       atomic_inc(&nkpod->timerlck);
        setbits(nkclock.status, XNTBLCK);
 }
 
 static inline void unlock_timers(void)
 {
-       if (xnarch_atomic_dec_and_test(&nkpod->timerlck))
+       if (atomic_dec_and_test(&nkpod->timerlck))
                clrbits(nkclock.status, XNTBLCK);
 }
 
@@ -298,14 +298,14 @@ static int enter_personality(struct xnpersonality 
*personality)
        if (personality->module && !try_module_get(personality->module))
                return -ENOSYS;
 
-       xnarch_atomic_inc(&personality->refcnt);
+       atomic_inc(&personality->refcnt);
 
        return 0;
 }
 
 static void leave_personality(struct xnpersonality *personality)
 {
-       xnarch_atomic_dec(&personality->refcnt);
+       atomic_dec(&personality->refcnt);
        if (personality->module)
                module_put(personality->module);
 }
@@ -883,7 +883,7 @@ int xnshadow_map_user(struct xnthread *thread,
        init_threadinfo(thread);
        xnthread_set_state(thread, XNMAPPED);
        xndebug_shadow_init(thread);
-       xnarch_atomic_inc(&sys_ppd->refcnt);
+       atomic_inc(&sys_ppd->refcnt);
        /*
         * ->map_thread() handler is invoked after the TCB is fully
         * built, and when we know for sure that current will go
@@ -1069,7 +1069,7 @@ void xnshadow_unmap(struct xnthread *thread)
 
        if (xnthread_test_state(thread, XNUSER)) {
                sys_ppd = xnsys_ppd_get(0);
-               xnarch_atomic_dec(&sys_ppd->refcnt);
+               atomic_dec(&sys_ppd->refcnt);
        }
 }
 
@@ -1645,8 +1645,8 @@ static struct xnshadow_ppd *user_process_attach(void)
                exe_path = NULL; /* Not lethal, but weird. */
        }
        p->exe_path = exe_path;
-       xnarch_atomic_set(&p->refcnt, 1);
-       xnarch_atomic_inc(&personalities[user_muxid]->refcnt);
+       atomic_set(&p->refcnt, 1);
+       atomic_inc(&personalities[user_muxid]->refcnt);
 
        return &p->ppd;
 }
@@ -1657,7 +1657,7 @@ static void user_process_detach(struct xnshadow_ppd *ppd)
 
        p = container_of(ppd, struct xnsys_ppd, ppd);
        xnheap_destroy_mapped(&p->sem_heap, post_ppd_release, NULL);
-       xnarch_atomic_dec(&personalities[user_muxid]->refcnt);
+       atomic_dec(&personalities[user_muxid]->refcnt);
 
        if (p->exe_path)
                kfree(p->exe_path);
@@ -1737,7 +1737,7 @@ int xnshadow_register_personality(struct xnpersonality 
*personality)
 
        for (muxid = 0; muxid < PERSONALITIES_NR; muxid++) {
                if (personalities[muxid] == NULL) {
-                       xnarch_atomic_set(&personality->refcnt, 0);
+                       atomic_set(&personality->refcnt, 0);
                        personalities[muxid] = personality;
                        break;
                }
@@ -1773,7 +1773,7 @@ int xnshadow_unregister_personality(int muxid)
        xnlock_get_irqsave(&nklock, s);
 
        personality = personalities[muxid];
-       if (xnarch_atomic_get(&personality->refcnt) > 0)
+       if (atomic_read(&personality->refcnt) > 0)
                ret = -EBUSY;
        else
                personalities[muxid] = NULL;
@@ -2229,7 +2229,7 @@ static int handle_taskexit_event(struct task_struct *p) 
/* p == current */
                xnheap_free(&sys_ppd->sem_heap, thread->u_window);
                thread->u_window = NULL;
                mm = xnshadow_current_mm();
-               if (!xnarch_atomic_get(&sys_ppd->refcnt))
+               if (atomic_read(&sys_ppd->refcnt) == 0)
                        ppd_remove_mm(mm, detach_ppd);
        }
 
@@ -2392,7 +2392,7 @@ static int handle_cleanup_event(struct mm_struct *mm)
                        handle_taskexit_event(current);
                        ipipe_disable_notifier(current);
                }
-               if (xnarch_atomic_dec_and_test(&sys_ppd->refcnt))
+               if (atomic_dec_and_test(&sys_ppd->refcnt))
                        ppd_remove_mm(mm, detach_ppd);
        }
 
diff --git a/kernel/cobalt/nucleus/synch.c b/kernel/cobalt/nucleus/synch.c
index aaecf05..b1a228f 100644
--- a/kernel/cobalt/nucleus/synch.c
+++ b/kernel/cobalt/nucleus/synch.c
@@ -42,7 +42,7 @@
 
 /*!
  * \fn void xnsynch_init(struct xnsynch *synch, xnflags_t flags,
- *                       xnarch_atomic_t *fastlock)
+ *                       atomic_long_t *fastlock)
  *
  * \brief Initialize a synchronization object.
  *
@@ -100,7 +100,7 @@
  * Rescheduling: never.
  */
 
-void xnsynch_init(struct xnsynch *synch, xnflags_t flags, xnarch_atomic_t 
*fastlock)
+void xnsynch_init(struct xnsynch *synch, xnflags_t flags, atomic_long_t 
*fastlock)
 {
        initph(&synch->link);
 
@@ -113,7 +113,7 @@ void xnsynch_init(struct xnsynch *synch, xnflags_t flags, 
xnarch_atomic_t *fastl
        if ((flags & XNSYNCH_OWNER)) {
                if (fastlock) {
                        synch->fastlock = fastlock;
-                       xnarch_atomic_set(fastlock, XN_NO_HANDLE);
+                       atomic_long_set(fastlock, XN_NO_HANDLE);
                } else
                        BUG();
        } else
@@ -431,7 +431,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
 {
        struct xnthread *thread = xnpod_current_thread(), *owner;
        xnhandle_t threadh = xnthread_handle(thread), fastlock, old;
-       xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
+       atomic_long_t *lockp = xnsynch_fastlock(synch);
        spl_t s;
 
        XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));
@@ -440,7 +440,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
 
       redo:
 
-       fastlock = xnarch_atomic_cmpxchg(lockp, XN_NO_HANDLE, threadh);
+       fastlock = atomic_long_cmpxchg(lockp, XN_NO_HANDLE, threadh);
 
        if (likely(fastlock == XN_NO_HANDLE)) {
                if (xnthread_test_state(thread, XNWEAK))
@@ -459,11 +459,11 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, 
xnticks_t timeout,
           where possible. Only if it appears not to be set, start
           with cmpxchg directly. */
        if (xnsynch_fast_is_claimed(fastlock)) {
-               old = xnarch_atomic_get(lockp);
+               old = atomic_long_read(lockp);
                goto test_no_owner;
        }
        do {
-               old = xnarch_atomic_cmpxchg(lockp, fastlock,
+               old = atomic_long_cmpxchg(lockp, fastlock,
                                xnsynch_fast_set_claimed(fastlock, 1));
                if (likely(old == fastlock))
                        break;
@@ -552,7 +552,7 @@ xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
                if (xnsynch_pended_p(synch))
                        threadh =
                                xnsynch_fast_set_claimed(threadh, 1);
-               xnarch_atomic_set(lockp, threadh);
+               atomic_long_set(lockp, threadh);
        }
 
       unlock_and_exit:
@@ -728,7 +728,7 @@ struct xnthread *__xnsynch_transfer_ownership(struct 
xnsynch *synch,
 {
        struct xnthread *newowner;
        struct xnpholder *holder;
-       xnarch_atomic_t *lockp;
+       atomic_long_t *lockp;
        xnhandle_t newownerh;
        spl_t s;
 
@@ -738,7 +738,7 @@ struct xnthread *__xnsynch_transfer_ownership(struct 
xnsynch *synch,
 
        if (emptypq_p(&synch->pendq)) {
                synch->owner = NULL;
-               xnarch_atomic_set(lockp, XN_NO_HANDLE);
+               atomic_long_set(lockp, XN_NO_HANDLE);
                xnlock_put_irqrestore(&nklock, s);
                return NULL;
        }
@@ -756,7 +756,7 @@ struct xnthread *__xnsynch_transfer_ownership(struct 
xnsynch *synch,
 
        newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
                                             xnsynch_pended_p(synch));
-       xnarch_atomic_set(lockp, newownerh);
+       atomic_long_set(lockp, newownerh);
 
        xnlock_put_irqrestore(&nklock, s);
 
diff --git a/lib/cobalt/printf.c b/lib/cobalt/printf.c
index e2320b9..99e4b90 100644
--- a/lib/cobalt/printf.c
+++ b/lib/cobalt/printf.c
@@ -29,7 +29,7 @@
 
 #include <rtdk.h>
 #include <nucleus/types.h>     /* For BITS_PER_LONG */
-#include <asm/xenomai/atomic.h>        /* For atomic_cmpxchg */
+#include <asm/xenomai/atomic.h>        /* For atomic_long_cmpxchg */
 #include <asm-generic/stack.h>
 #include <asm-generic/current.h>
 #include "internal.h"
@@ -85,7 +85,7 @@ static pthread_mutex_t buffer_lock;
 static pthread_cond_t printer_wakeup;
 static pthread_key_t buffer_key;
 static pthread_t printer_thread;
-static xnarch_atomic_t *pool_bitmap;
+static atomic_long_t *pool_bitmap;
 static unsigned pool_bitmap_len;
 static unsigned pool_buf_size;
 static unsigned long pool_start, pool_len;
@@ -387,7 +387,7 @@ int rt_print_init(size_t buffer_size, const char 
*buffer_name)
                unsigned i;
 
                for (i = 0; i < pool_bitmap_len; i++) {
-                       old_bitmap = xnarch_atomic_get(&pool_bitmap[i]);
+                       old_bitmap = atomic_long_read(&pool_bitmap[i]);
                        if (old_bitmap)
                                goto acquire;
                }
@@ -398,9 +398,9 @@ int rt_print_init(size_t buffer_size, const char 
*buffer_name)
                do {
                        bitmap = old_bitmap;
                        j = __builtin_ffsl(bitmap) - 1;
-                       old_bitmap = xnarch_atomic_cmpxchg(&pool_bitmap[i],
-                                                          bitmap,
-                                                          bitmap & ~(1UL << 
j));
+                       old_bitmap = atomic_long_cmpxchg(&pool_bitmap[i],
+                                                        bitmap,
+                                                        bitmap & ~(1UL << j));
                } while (old_bitmap != bitmap && old_bitmap);
                j += i * BITS_PER_LONG;
        } while (!old_bitmap);
@@ -509,12 +509,12 @@ static void cleanup_buffer(struct print_buffer *buffer)
                i = j / BITS_PER_LONG;
                j = j % BITS_PER_LONG;
 
-               old_bitmap = xnarch_atomic_get(&pool_bitmap[i]);
+               old_bitmap = atomic_long_read(&pool_bitmap[i]);
                do {
                        bitmap = old_bitmap;
-                       old_bitmap = xnarch_atomic_cmpxchg(&pool_bitmap[i],
-                                                          bitmap,
-                                                          bitmap | (1UL << j));
+                       old_bitmap = atomic_long_cmpxchg(&pool_bitmap[i],
+                                                        bitmap,
+                                                        bitmap | (1UL << j));
                } while (old_bitmap != bitmap);
 
                return;
@@ -738,10 +738,10 @@ void cobalt_print_init(void)
                }
 
                for (i = 0; i < buffers_count / BITS_PER_LONG; i++)
-                       xnarch_atomic_set(&pool_bitmap[i], ~0UL);
+                       atomic_long_set(&pool_bitmap[i], ~0UL);
                if (buffers_count % BITS_PER_LONG)
-                       xnarch_atomic_set(&pool_bitmap[i],
-                                         (1UL << (buffers_count % 
BITS_PER_LONG)) - 1);
+                       atomic_long_set(&pool_bitmap[i],
+                                       (1UL << (buffers_count % 
BITS_PER_LONG)) - 1);
 
                for (i = 0; i < buffers_count; i++) {
                        struct print_buffer *buffer =


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to