Module: xenomai-forge
Branch: master
Commit: d9493f51ea13c662002c8068eccfdf7132533e30
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=d9493f51ea13c662002c8068eccfdf7132533e30

Author: Philippe Gerum <r...@xenomai.org>
Date:   Tue Jun 25 09:39:27 2013 +0200

cobalt/kernel: stop wrapping common memory barriers

As the simulator build was dropped long ago, there is no point in
wrapping membars anymore. Use generic kernel memory barriers when
applicable.

---

 include/cobalt/asm-generic/atomic.h |   36 ++++++++++++++++------------------
 include/cobalt/kernel/pod.h         |    7 +++--
 include/cobalt/kernel/seqlock.h     |    8 +++---
 include/cobalt/kernel/stat.h        |    2 +-
 kernel/cobalt/intr.c                |    6 ++--
 kernel/cobalt/pod.c                 |    5 +--
 lib/cobalt/printf.c                 |    8 +++---
 7 files changed, 35 insertions(+), 37 deletions(-)

diff --git a/include/cobalt/asm-generic/atomic.h 
b/include/cobalt/asm-generic/atomic.h
index aeeca83..7b44887 100644
--- a/include/cobalt/asm-generic/atomic.h
+++ b/include/cobalt/asm-generic/atomic.h
@@ -30,12 +30,10 @@ typedef unsigned long atomic_flags_t;
 #include <asm/atomic.h>
 #include <asm/xenomai/wrappers.h>
 
-#define xnarch_memory_barrier()                smp_mb()
-#define xnarch_read_memory_barrier()    rmb()
-#define xnarch_write_memory_barrier()   wmb()
-
-/* atomic_set_mask, atomic_clear_mask are not standard among linux
-   ports */
+/*
+ * atomic_set_mask, atomic_clear_mask may not be available from all
+ * linux ports.
+ */
 #ifndef xnarch_atomic_set_mask
 #define xnarch_atomic_set_mask(pflags,mask) atomic_set_mask((mask),(pflags))
 #endif
@@ -52,20 +50,8 @@ typedef struct {
        unsigned long v;
 } atomic_long_t;
 
-#ifndef xnarch_memory_barrier
-#define xnarch_memory_barrier() __sync_synchronize()
-#endif
-
-#ifndef xnarch_read_memory_barrier
-#define xnarch_read_memory_barrier() xnarch_memory_barrier()
-#endif
-
-#ifndef xnarch_write_memory_barrier
-#define xnarch_write_memory_barrier() xnarch_memory_barrier()
-#endif
-
 #ifndef cpu_relax
-#define cpu_relax() xnarch_memory_barrier()
+#define cpu_relax() __sync_synchronize()
 #endif
 
 #ifndef atomic_long_read
@@ -83,6 +69,18 @@ typedef struct {
                                    (typeof((p)->v))(n))
 #endif
 
+#ifndef smp_mb
+#define smp_mb() __sync_synchronize()
+#endif
+
+#ifndef smp_rmb
+#define smp_rmb() __sync_synchronize()
+#endif
+
+#ifndef smp_wmb
+#define smp_wmb() __sync_synchronize()
+#endif
+
 #endif /* !__KERNEL__ */
 
 #endif /* _COBALT_ASM_GENERIC_ATOMIC_H */
diff --git a/include/cobalt/kernel/pod.h b/include/cobalt/kernel/pod.h
index 2840d2d..c01cd8e 100644
--- a/include/cobalt/kernel/pod.h
+++ b/include/cobalt/kernel/pod.h
@@ -232,10 +232,11 @@ static inline void xnpod_schedule(void)
         * be either valid, or unused.
         */
        sched = xnpod_current_sched();
+       smp_rmb();
        /*
-        * No immediate rescheduling is possible if an ISR or callout
-        * context is active, or if we are caught in the middle of a
-        * unlocked context switch.
+        * No immediate rescheduling is possible if an ISR context is
+        * active, or if we are caught in the middle of a unlocked
+        * context switch.
         */
 #if XENO_DEBUG(NUCLEUS)
        if (testbits(sched->status | sched->lflags,
diff --git a/include/cobalt/kernel/seqlock.h b/include/cobalt/kernel/seqlock.h
index 7732a8f..9abe1a3 100644
--- a/include/cobalt/kernel/seqlock.h
+++ b/include/cobalt/kernel/seqlock.h
@@ -19,7 +19,7 @@ static inline unsigned xnread_seqcount_begin(const 
xnseqcount_t *s)
 
 repeat:
        ret = s->sequence;
-       xnarch_read_memory_barrier();
+       smp_rmb();
        if (ret & 1) {
                cpu_relax();
                goto repeat;
@@ -32,7 +32,7 @@ repeat:
  */
 static inline int xnread_seqcount_retry(const xnseqcount_t *s, unsigned start)
 {
-       xnarch_read_memory_barrier();
+       smp_rmb();
 
        return s->sequence != start;
 }
@@ -45,12 +45,12 @@ static inline int xnread_seqcount_retry(const xnseqcount_t 
*s, unsigned start)
 static inline void xnwrite_seqcount_begin(xnseqcount_t *s)
 {
        s->sequence++;
-       xnarch_write_memory_barrier();
+       smp_wmb();
 }
 
 static inline void xnwrite_seqcount_end(xnseqcount_t *s)
 {
-       xnarch_write_memory_barrier();
+       smp_wmb();
        s->sequence++;
 }
 
diff --git a/include/cobalt/kernel/stat.h b/include/cobalt/kernel/stat.h
index c9a182f..df9f983 100644
--- a/include/cobalt/kernel/stat.h
+++ b/include/cobalt/kernel/stat.h
@@ -46,7 +46,7 @@ do { \
        (sched)->last_account_switch = date; \
        /* All changes must be committed before changing the current_account \
           reference in sched (required for xnintr_sync_stat_references) */ \
-       xnarch_memory_barrier(); \
+       smp_wmb(); \
 } while (0)
 
 /* Update the current account reference, returning the previous one. */
diff --git a/kernel/cobalt/intr.c b/kernel/cobalt/intr.c
index b07a44f..fbc4478 100644
--- a/kernel/cobalt/intr.c
+++ b/kernel/cobalt/intr.c
@@ -52,14 +52,14 @@ static int xnintr_list_rev;  /* Modification counter of 
xnintr list */
 static inline void xnintr_stat_counter_inc(void)
 {
        xnintr_count++;
-       xnarch_memory_barrier();
+       smp_mb();
        xnintr_list_rev++;
 }
 
 static inline void xnintr_stat_counter_dec(void)
 {
        xnintr_count--;
-       xnarch_memory_barrier();
+       smp_mb();
        xnintr_list_rev++;
 }
 
@@ -911,7 +911,7 @@ int xnintr_query_init(xnintr_iterator_t *iterator)
         * xnintr_query() will trigger an appropriate error below.
         */
        iterator->list_rev = xnintr_list_rev;
-       xnarch_memory_barrier();
+       smp_mb();
 
        return xnintr_count;
 }
diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index caff4d7..0ed9986 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -212,7 +212,6 @@ EXPORT_SYMBOL_GPL(xnpod_fatal);
 void __xnpod_schedule_handler(void) /* hw interrupts off. */
 {
        trace_mark(xn_nucleus, sched_remote, MARK_NOARGS);
-       xnarch_memory_barrier();
        xnpod_schedule();
 }
 
@@ -292,7 +291,7 @@ int xnpod_init(void)
        xnregistry_init();
 
        __setbits(pod->status, XNPEXEC);
-       xnarch_memory_barrier();
+       smp_wmb();
        xnshadow_grab_events();
 
        ret = xnpod_enable_timesource();
@@ -1700,7 +1699,7 @@ static inline int test_resched(struct xnsched *sched)
 #ifdef CONFIG_SMP
        /* Send resched IPI to remote CPU(s). */
        if (unlikely(!cpus_empty(sched->resched))) {
-               xnarch_memory_barrier();
+               smp_mb();
                ipipe_send_ipi(IPIPE_RESCHEDULE_IPI, sched->resched);
                cpus_clear(sched->resched);
        }
diff --git a/lib/cobalt/printf.c b/lib/cobalt/printf.c
index a6c4ea6..850817d 100644
--- a/lib/cobalt/printf.c
+++ b/lib/cobalt/printf.c
@@ -122,7 +122,7 @@ vprint_to_buffer(FILE *stream, int fortify_level, int 
priority,
        /* Take a snapshot of the ring buffer state */
        write_pos = buffer->write_pos;
        read_pos = buffer->read_pos;
-       xnarch_read_memory_barrier();
+       smp_mb();
 
        /* Is our write limit the end of the ring buffer? */
        if (write_pos >= read_pos) {
@@ -231,7 +231,7 @@ vprint_to_buffer(FILE *stream, int fortify_level, int 
priority,
        }
 
        /* All entry data must be written before we can update write_pos */
-       xnarch_write_memory_barrier();
+       smp_wmb();
 
        buffer->write_pos = write_pos;
 
@@ -640,11 +640,11 @@ static void print_buffers(void)
 
                /* Make sure we have read the entry competely before
                   forwarding read_pos */
-               xnarch_read_memory_barrier();
+               smp_rmb();
                buffer->read_pos = read_pos;
 
                /* Enforce the read_pos update before proceeding */
-               xnarch_write_memory_barrier();
+               smp_wmb();
        }
 }
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to