Module: xenomai-forge
Branch: next
Commit: 2f6ba5046143ce557a8a99888cde26ce352b45a5
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=2f6ba5046143ce557a8a99888cde26ce352b45a5

Author: Philippe Gerum <r...@xenomai.org>
Date:   Fri Jun 28 19:55:55 2013 +0200

cobalt/kernel: drop [__]testbits

This macros bring no value. Prefer explicit, open coded bitwise tests.

---

 include/cobalt/kernel/pod.h          |    8 ++--
 include/cobalt/kernel/sched.h        |    4 +-
 include/cobalt/kernel/synch.h        |    3 +-
 include/cobalt/kernel/thread.h       |   16 ++++++-
 include/cobalt/kernel/timer.h        |    8 ++--
 include/cobalt/kernel/types.h        |    2 -
 kernel/cobalt/clock.c                |    4 +-
 kernel/cobalt/intr.c                 |    6 +-
 kernel/cobalt/pipe.c                 |   70 ++++++++++++++++-----------------
 kernel/cobalt/pod.c                  |   14 +++---
 kernel/cobalt/rtdm/drvlib.c          |   19 +++------
 kernel/cobalt/synch.c                |   30 +++++++-------
 kernel/cobalt/timer.c                |   25 ++++++------
 kernel/drivers/serial/16550A.c       |   64 ++++++++++++++----------------
 kernel/drivers/serial/mpc52xx_uart.c |   67 +++++++++++++++-----------------
 kernel/drivers/serial/rt_imx_uart.c  |   60 +++++++++++++---------------
 16 files changed, 194 insertions(+), 206 deletions(-)

diff --git a/include/cobalt/kernel/pod.h b/include/cobalt/kernel/pod.h
index 3a4a5d8..cd367bb 100644
--- a/include/cobalt/kernel/pod.h
+++ b/include/cobalt/kernel/pod.h
@@ -237,12 +237,12 @@ static inline void xnpod_schedule(void)
         * context switch.
         */
 #if XENO_DEBUG(NUCLEUS)
-       if (testbits(sched->status | sched->lflags,
-                    XNINIRQ|XNINSW|XNINLOCK))
+       if ((sched->status|sched->lflags) &
+           (XNINIRQ|XNINSW|XNINLOCK))
                return;
 #else /* !XENO_DEBUG(NUCLEUS) */
-       if (testbits(sched->status | sched->lflags,
-                    XNINIRQ|XNINSW|XNRESCHED|XNINLOCK) != XNRESCHED)
+       if (((sched->status|sched->lflags) &
+            (XNINIRQ|XNINSW|XNRESCHED|XNINLOCK)) != XNRESCHED)
                return;
 #endif /* !XENO_DEBUG(NUCLEUS) */
 
diff --git a/include/cobalt/kernel/sched.h b/include/cobalt/kernel/sched.h
index 3e7112c..db0431c 100644
--- a/include/cobalt/kernel/sched.h
+++ b/include/cobalt/kernel/sched.h
@@ -142,7 +142,7 @@ struct xnsched_class {
 /* Test resched flag of given sched. */
 static inline int xnsched_resched_p(struct xnsched *sched)
 {
-       return testbits(sched->status, XNRESCHED);
+       return sched->status & XNRESCHED;
 }
 
 /* Set self resched flag for the given scheduler. */
@@ -186,7 +186,7 @@ struct xnsched *xnsched_finish_unlocked_switch(struct 
xnsched *sched);
 static inline
 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
 {
-       return testbits(sched->status, XNRESCHED);
+       return sched->status & XNRESCHED;
 }
 
 #else /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
diff --git a/include/cobalt/kernel/synch.h b/include/cobalt/kernel/synch.h
index f270f20..1230d49 100644
--- a/include/cobalt/kernel/synch.h
+++ b/include/cobalt/kernel/synch.h
@@ -62,8 +62,7 @@ typedef struct xnsynch {
        void (*cleanup)(struct xnsynch *synch); /* Cleanup handler */
 } xnsynch_t;
 
-#define xnsynch_test_flags(synch,flags)        testbits((synch)->status,flags)
-#define xnsynch_set_flags(synch,flags) setbits((synch)->status,flags)
+#define xnsynch_set_flags(synch,flags)         setbits((synch)->status,flags)
 #define xnsynch_clear_flags(synch,flags)       clrbits((synch)->status,flags)
 
 #define xnsynch_for_each_sleeper(__pos, __synch)               \
diff --git a/include/cobalt/kernel/thread.h b/include/cobalt/kernel/thread.h
index cd2157e..287b779 100644
--- a/include/cobalt/kernel/thread.h
+++ b/include/cobalt/kernel/thread.h
@@ -182,10 +182,20 @@ typedef struct xnthread {
 #define xnthread_sched(thread)             ((thread)->sched)
 #define xnthread_start_time(thread)        ((thread)->stime)
 #define xnthread_state_flags(thread)       ((thread)->state)
-#define xnthread_test_state(thread,flags)  testbits((thread)->state,flags)
+
+static inline int xnthread_test_state(struct xnthread *thread, int bits)
+{
+       return thread->state & bits;
+}
+
 #define xnthread_set_state(thread,flags)   __setbits((thread)->state,flags)
 #define xnthread_clear_state(thread,flags) __clrbits((thread)->state,flags)
-#define xnthread_test_info(thread,flags)   testbits((thread)->info,flags)
+
+static inline int xnthread_test_info(struct xnthread *thread, int bits)
+{
+       return thread->info & bits;
+}
+
 #define xnthread_set_info(thread,flags)    __setbits((thread)->info,flags)
 #define xnthread_clear_info(thread,flags)  __clrbits((thread)->info,flags)
 #define xnthread_lock_count(thread)        ((thread)->schedlck)
@@ -277,7 +287,7 @@ xnsynch_release(struct xnsynch *synch, struct xnthread 
*thread)
        atomic_long_t *lockp;
        xnhandle_t threadh;
 
-       XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));
+       XENO_BUGON(NUCLEUS, (synch->status & XNSYNCH_OWNER) == 0);
 
        trace_mark(xn_nucleus, synch_release, "synch %p", synch);
 
diff --git a/include/cobalt/kernel/timer.h b/include/cobalt/kernel/timer.h
index 294e675..99d78c8 100644
--- a/include/cobalt/kernel/timer.h
+++ b/include/cobalt/kernel/timer.h
@@ -218,13 +218,13 @@ static inline int xntimer_active_p (xntimer_t *timer)
 
 static inline int xntimer_running_p(xntimer_t *timer)
 {
-       return !testbits(timer->status,XNTIMER_DEQUEUED);
+       return (timer->status & XNTIMER_DEQUEUED) == 0;
 }
 
 static inline int xntimer_reload_p(xntimer_t *timer)
 {
-       return testbits(timer->status,
-                       XNTIMER_PERIODIC|XNTIMER_DEQUEUED|XNTIMER_KILLED) ==
+       return (timer->status &
+               (XNTIMER_PERIODIC|XNTIMER_DEQUEUED|XNTIMER_KILLED)) ==
                (XNTIMER_PERIODIC|XNTIMER_DEQUEUED);
 }
 
@@ -277,7 +277,7 @@ xnticks_t xntimer_get_interval(xntimer_t *timer);
 
 static inline void xntimer_stop(xntimer_t *timer)
 {
-       if (!testbits(timer->status,XNTIMER_DEQUEUED))
+       if ((timer->status & XNTIMER_DEQUEUED) == 0)
                __xntimer_stop(timer);
 }
 
diff --git a/include/cobalt/kernel/types.h b/include/cobalt/kernel/types.h
index 6ea8d89..5cf3f9a 100644
--- a/include/cobalt/kernel/types.h
+++ b/include/cobalt/kernel/types.h
@@ -22,10 +22,8 @@
 
 #include <cobalt/uapi/sys/types.h>
 
-#define testbits(flags,mask) ((flags) & (mask))
 #define setbits(flags,mask)  xnarch_atomic_set_mask(&(flags),mask)
 #define clrbits(flags,mask)  xnarch_atomic_clear_mask(&(flags),mask)
-#define __testbits(flags,mask) testbits(flags,mask)
 #define __setbits(flags,mask)  do { (flags) |= (mask); } while(0)
 #define __clrbits(flags,mask)  do { (flags) &= ~(mask); } while(0)
 
diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c
index 5189eaa..f901e80 100644
--- a/kernel/cobalt/clock.c
+++ b/kernel/cobalt/clock.c
@@ -160,10 +160,10 @@ static int tmstat_vfile_show(struct 
xnvfile_snapshot_iterator *it, void *data)
                               "CPU", "SCHEDULED", "FIRED", "TIMEOUT",
                               "INTERVAL", "HANDLER", "NAME");
        else {
-               if (!testbits(p->status, XNTIMER_DEQUEUED))
+               if ((p->status & XNTIMER_DEQUEUED) == 0)
                        snprintf(timeout_buf, sizeof(timeout_buf), "%-10llu",
                                 p->timeout);
-               if (testbits(p->status, XNTIMER_PERIODIC))
+               if (p->status & XNTIMER_PERIODIC)
                        snprintf(interval_buf, sizeof(interval_buf), "%-10llu",
                                 p->interval);
                xnvfile_printf(it,
diff --git a/kernel/cobalt/intr.c b/kernel/cobalt/intr.c
index ada9455..9bc5ab1 100644
--- a/kernel/cobalt/intr.c
+++ b/kernel/cobalt/intr.c
@@ -126,7 +126,7 @@ void xnintr_clock_handler(void)
         * we only need to propagate the host tick in case the
         * interrupt preempted the root thread.
         */
-       if (testbits(sched->lflags, XNHTICK) &&
+       if ((sched->lflags & XNHTICK) &&
            xnthread_test_state(sched->curr, XNROOT))
                xnintr_host_tick(sched);
 
@@ -723,7 +723,7 @@ int xnintr_attach(xnintr_t *intr, void *cookie)
 
        xnlock_get_irqsave(&intrlock, s);
 
-       if (__testbits(intr->flags, XN_ISR_ATTACHED)) {
+       if (intr->flags & XN_ISR_ATTACHED) {
                ret = -EBUSY;
                goto out;
        }
@@ -781,7 +781,7 @@ int xnintr_detach(xnintr_t *intr)
 
        xnlock_get_irqsave(&intrlock, s);
 
-       if (!__testbits(intr->flags, XN_ISR_ATTACHED)) {
+       if ((intr->flags & XN_ISR_ATTACHED) == 0) {
                ret = -EINVAL;
                goto out;
        }
diff --git a/kernel/cobalt/pipe.c b/kernel/cobalt/pipe.c
index dc18f04..09489c5 100644
--- a/kernel/cobalt/pipe.c
+++ b/kernel/cobalt/pipe.c
@@ -65,8 +65,8 @@ static inline int xnpipe_minor_alloc(int minor)
                minor = find_first_zero_bit(xnpipe_bitmap, XNPIPE_NDEVS);
 
        if (minor == XNPIPE_NDEVS ||
-           testbits(xnpipe_bitmap[minor / BITS_PER_LONG],
-                    1UL << (minor % BITS_PER_LONG)))
+           (xnpipe_bitmap[minor / BITS_PER_LONG] &
+            (1UL << (minor % BITS_PER_LONG))))
                minor = -EBUSY;
        else
                __setbits(xnpipe_bitmap[minor / BITS_PER_LONG],
@@ -93,7 +93,7 @@ static inline void xnpipe_enqueue_wait(struct xnpipe_state 
*state, int mask)
 
 static inline void xnpipe_dequeue_wait(struct xnpipe_state *state, int mask)
 {
-       if (testbits(state->status, mask))
+       if (state->status & mask)
                if (--state->wcount == 0) {
                        list_del(&state->slink);
                        __clrbits(state->status, mask);
@@ -102,7 +102,7 @@ static inline void xnpipe_dequeue_wait(struct xnpipe_state 
*state, int mask)
 
 static inline void xnpipe_dequeue_all(struct xnpipe_state *state, int mask)
 {
-       if (testbits(state->status, mask)) {
+       if (state->status & mask) {
                if (state->wcount) {
                        state->wcount = 0;
                        list_del(&state->slink);
@@ -163,7 +163,7 @@ static void xnpipe_wakeup_proc(void *cookie)
                state = list_first_entry(&xnpipe_sleepq, struct xnpipe_state, 
slink);
 
                for (;;) {
-                       rbits = testbits(state->status, XNPIPE_USER_ALL_READY);
+                       rbits = state->status & XNPIPE_USER_ALL_READY;
                        if (rbits)
                                break;
                        if (list_is_last(&state->slink, &xnpipe_sleepq))
@@ -201,7 +201,7 @@ check_async:
                state = list_first_entry(&xnpipe_asyncq, struct xnpipe_state, 
alink);
 
                for (;;) {
-                       if (testbits(state->status, XNPIPE_USER_SIGIO))
+                       if (state->status & XNPIPE_USER_SIGIO)
                                break;
                        if (list_is_last(&state->alink, &xnpipe_asyncq))
                                goto out;
@@ -335,8 +335,8 @@ int xnpipe_connect(int minor, struct xnpipe_operations 
*ops, void *xstate)
        state->xstate = xstate;
        state->ionrd = 0;
 
-       if (testbits(state->status, XNPIPE_USER_CONN)) {
-               if (testbits(state->status, XNPIPE_USER_WREAD)) {
+       if (state->status & XNPIPE_USER_CONN) {
+               if (state->status & XNPIPE_USER_WREAD) {
                        /*
                         * Wake up the regular Linux task waiting for
                         * the kernel side to connect (xnpipe_open).
@@ -373,7 +373,7 @@ int xnpipe_disconnect(int minor)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (!testbits(state->status, XNPIPE_KERN_CONN)) {
+       if ((state->status & XNPIPE_KERN_CONN) == 0) {
                xnlock_put_irqrestore(&nklock, s);
                return -EBADF;
        }
@@ -382,7 +382,7 @@ int xnpipe_disconnect(int minor)
 
        state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);
 
-       if (!testbits(state->status, XNPIPE_USER_CONN))
+       if ((state->status & XNPIPE_USER_CONN) == 0)
                goto cleanup;
 
        xnpipe_flushq(state, inq, free_ibuf, s);
@@ -390,7 +390,7 @@ int xnpipe_disconnect(int minor)
        if (xnsynch_destroy(&state->synchbase) == XNSYNCH_RESCHED)
                xnpod_schedule();
 
-       if (testbits(state->status, XNPIPE_USER_WREAD)) {
+       if (state->status & XNPIPE_USER_WREAD) {
                /*
                 * Wake up the regular Linux task waiting for some
                 * operation from the Xenomai side (read/write or
@@ -411,7 +411,7 @@ cleanup:
         * close. This will prevent the extra state from being wiped
         * out until then.
         */
-       if (testbits(state->status, XNPIPE_USER_CONN))
+       if (state->status & XNPIPE_USER_CONN)
                __setbits(state->status, XNPIPE_KERN_LCLOSE);
        else {
                xnlock_put_irqrestore(&nklock, s);
@@ -445,7 +445,7 @@ ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, size_t 
size, int flags)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (!testbits(state->status, XNPIPE_KERN_CONN)) {
+       if ((state->status & XNPIPE_KERN_CONN) == 0) {
                xnlock_put_irqrestore(&nklock, s);
                return -EBADF;
        }
@@ -461,12 +461,12 @@ ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, 
size_t size, int flags)
 
        state->nroutq++;
 
-       if (!testbits(state->status, XNPIPE_USER_CONN)) {
+       if ((state->status & XNPIPE_USER_CONN) == 0) {
                xnlock_put_irqrestore(&nklock, s);
                return (ssize_t) size;
        }
 
-       if (testbits(state->status, XNPIPE_USER_WREAD)) {
+       if (state->status & XNPIPE_USER_WREAD) {
                /*
                 * Wake up the regular Linux task waiting for input
                 * from the Xenomai side.
@@ -504,7 +504,7 @@ ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, 
ssize_t size)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (!testbits(state->status, XNPIPE_KERN_CONN)) {
+       if ((state->status & XNPIPE_KERN_CONN) == 0) {
                xnlock_put_irqrestore(&nklock, s);
                return -EBADF;
        }
@@ -537,7 +537,7 @@ ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, 
xnticks_t timeout)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (!testbits(state->status, XNPIPE_KERN_CONN)) {
+       if ((state->status & XNPIPE_KERN_CONN) == 0) {
                ret = -EBADF;
                goto unlock_and_exit;
        }
@@ -581,7 +581,7 @@ ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, 
xnticks_t timeout)
        state->nrinq--;
        ret = (ssize_t)xnpipe_m_size(mh);
 
-       if (testbits(state->status, XNPIPE_USER_WSYNC)) {
+       if (state->status & XNPIPE_USER_WSYNC) {
                __setbits(state->status, XNPIPE_USER_WSYNC_READY);
                xnpipe_schedule_request();
        }
@@ -607,7 +607,7 @@ int xnpipe_flush(int minor, int mode)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (!testbits(state->status, XNPIPE_KERN_CONN)) {
+       if ((state->status & XNPIPE_KERN_CONN) == 0) {
                xnlock_put_irqrestore(&nklock, s);
                return -EBADF;
        }
@@ -620,7 +620,7 @@ int xnpipe_flush(int minor, int mode)
        if (mode & XNPIPE_IFLUSH)
                xnpipe_flushq(state, inq, free_ibuf, s);
 
-       if (testbits(state->status, XNPIPE_USER_WSYNC) &&
+       if ((state->status & XNPIPE_USER_WSYNC) &&
            msgcount > state->nroutq + state->nrinq) {
                __setbits(state->status, XNPIPE_USER_WSYNC_READY);
                xnpipe_schedule_request();
@@ -638,7 +638,7 @@ EXPORT_SYMBOL_GPL(xnpipe_flush);
                xnpipe_flushq((__state), outq, free_obuf, (__s));       \
                xnpipe_flushq((__state), inq, free_ibuf, (__s));        \
                __clrbits((__state)->status, XNPIPE_USER_CONN);         \
-               if (testbits((__state)->status, XNPIPE_KERN_LCLOSE)) {  \
+               if ((__state)->status & XNPIPE_KERN_LCLOSE) {           \
                        clrbits((__state)->status, XNPIPE_KERN_LCLOSE); \
                        xnlock_put_irqrestore(&nklock, (__s));          \
                        (__state)->ops.release((__state)->xstate);      \
@@ -667,7 +667,7 @@ static int xnpipe_open(struct inode *inode, struct file 
*file)
        xnlock_get_irqsave(&nklock, s);
 
        /* Enforce exclusive open for the message queues. */
-       if (testbits(state->status, XNPIPE_USER_CONN)) {
+       if (state->status & XNPIPE_USER_CONN) {
                xnlock_put_irqrestore(&nklock, s);
                return -EBUSY;
        }
@@ -683,16 +683,15 @@ static int xnpipe_open(struct inode *inode, struct file 
*file)
                  XNPIPE_USER_ALL_WAIT | XNPIPE_USER_ALL_READY |
                  XNPIPE_USER_SIGIO);
 
-       if (!testbits(state->status, XNPIPE_KERN_CONN)) {
-               if (testbits(file->f_flags, O_NONBLOCK)) {
+       if ((state->status & XNPIPE_KERN_CONN) == 0) {
+               if (file->f_flags & O_NONBLOCK) {
                        xnpipe_cleanup_user_conn(state, s);
                        xnlock_put_irqrestore(&nklock, s);
                        return -EWOULDBLOCK;
                }
 
                sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
-                                        testbits(state->status,
-                                                 XNPIPE_KERN_CONN));
+                                        state->status & XNPIPE_KERN_CONN);
                if (sigpending) {
                        xnpipe_cleanup_user_conn(state, s);
                        xnlock_put_irqrestore(&nklock, s);
@@ -718,7 +717,7 @@ static int xnpipe_release(struct inode *inode, struct file 
*file)
        xnpipe_dequeue_all(state, XNPIPE_USER_WREAD);
        xnpipe_dequeue_all(state, XNPIPE_USER_WSYNC);
 
-       if (testbits(state->status, XNPIPE_KERN_CONN)) {
+       if (state->status & XNPIPE_KERN_CONN) {
                /* Unblock waiters. */
                if (xnsynch_pended_p(&state->synchbase)) {
                        xnsynch_flush(&state->synchbase, XNRMID);
@@ -764,7 +763,7 @@ static ssize_t xnpipe_read(struct file *file,
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (!testbits(state->status, XNPIPE_KERN_CONN)) {
+       if ((state->status & XNPIPE_KERN_CONN) == 0) {
                xnlock_put_irqrestore(&nklock, s);
                return -EPIPE;
        }
@@ -845,7 +844,7 @@ static ssize_t xnpipe_read(struct file *file,
                xnlock_put_irqrestore(&nklock, s);
                state->ops.free_obuf(mh, state->xstate);
                xnlock_get_irqsave(&nklock, s);
-               if (testbits(state->status, XNPIPE_USER_WSYNC)) {
+               if (state->status & XNPIPE_USER_WSYNC) {
                        __setbits(state->status, XNPIPE_USER_WSYNC_READY);
                        xnpipe_schedule_request();
                }
@@ -873,7 +872,7 @@ static ssize_t xnpipe_write(struct file *file,
        xnlock_get_irqsave(&nklock, s);
 
 retry:
-       if (!testbits(state->status, XNPIPE_KERN_CONN)) {
+       if ((state->status & XNPIPE_KERN_CONN) == 0) {
                xnlock_put_irqrestore(&nklock, s);
                return -EPIPE;
        }
@@ -953,7 +952,7 @@ static long xnpipe_ioctl(struct file *file, unsigned int 
cmd, unsigned long arg)
 
                xnlock_get_irqsave(&nklock, s);
 
-               if (!testbits(state->status, XNPIPE_KERN_CONN)) {
+               if ((state->status & XNPIPE_KERN_CONN) == 0) {
                        xnlock_put_irqrestore(&nklock, s);
                        return -EPIPE;
                }
@@ -966,7 +965,7 @@ static long xnpipe_ioctl(struct file *file, unsigned int 
cmd, unsigned long arg)
 
                xnlock_get_irqsave(&nklock, s);
 
-               if (!testbits(state->status, XNPIPE_KERN_CONN)) {
+               if ((state->status & XNPIPE_KERN_CONN) == 0) {
                        xnlock_put_irqrestore(&nklock, s);
                        return -EPIPE;
                }
@@ -975,7 +974,7 @@ static long xnpipe_ioctl(struct file *file, unsigned int 
cmd, unsigned long arg)
 
        kick_wsync:
 
-               if (n > 0 && testbits(state->status, XNPIPE_USER_WSYNC)) {
+               if (n > 0 && (state->status & XNPIPE_USER_WSYNC)) {
                        __setbits(state->status, XNPIPE_USER_WSYNC_READY);
                        xnpipe_schedule_request();
                }
@@ -994,8 +993,7 @@ static long xnpipe_ioctl(struct file *file, unsigned int 
cmd, unsigned long arg)
 
        case FIONREAD:
 
-               n = testbits(state->status,
-                            XNPIPE_KERN_CONN) ? state->ionrd : 0;
+               n = (state->status & XNPIPE_KERN_CONN) ? state->ionrd : 0;
 
                if (put_user(n, (int *)arg))
                        return -EFAULT;
@@ -1048,7 +1046,7 @@ static unsigned xnpipe_poll(struct file *file, poll_table 
*pt)
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (testbits(state->status, XNPIPE_KERN_CONN))
+       if (state->status & XNPIPE_KERN_CONN)
                w_mask |= (POLLOUT | POLLWRNORM);
        else
                r_mask |= POLLHUP;
diff --git a/kernel/cobalt/pod.c b/kernel/cobalt/pod.c
index 23b6d1a..4a36a87 100644
--- a/kernel/cobalt/pod.c
+++ b/kernel/cobalt/pod.c
@@ -749,7 +749,7 @@ static inline int moving_target(struct xnsched *sched, 
struct xnthread *thread)
         * CPU, do nothing, this case will be caught in
         * xnsched_finish_unlocked_switch.
         */
-       ret = testbits(sched->status, XNINSW) ||
+       ret = (sched->status & XNINSW) ||
                xnthread_test_state(thread, XNMIGRATE);
 #endif
        return ret;
@@ -1520,7 +1520,7 @@ int xnpod_set_thread_schedparam(struct xnthread *thread,
         * currently does.
         */
        if (old_wprio != new_wprio && thread->wchan != NULL &&
-           !testbits(thread->wchan->status, XNSYNCH_DREORD))
+           (thread->wchan->status & XNSYNCH_DREORD) == 0)
                /*
                 * Update the pending order of the thread inside its
                 * wait queue, unless this behaviour has been
@@ -1701,7 +1701,7 @@ static inline void xnpod_switch_to(xnsched_t *sched,
 
 static inline int test_resched(struct xnsched *sched)
 {
-       int resched = testbits(sched->status, XNRESCHED);
+       int resched = sched->status & XNRESCHED;
 #ifdef CONFIG_SMP
        /* Send resched IPI to remote CPU(s). */
        if (unlikely(!cpus_empty(sched->resched))) {
@@ -1772,9 +1772,9 @@ reschedule:
        next = xnsched_pick_next(sched);
        if (next == curr) {
                if (unlikely(xnthread_test_state(next, XNROOT))) {
-                       if (testbits(sched->lflags, XNHTICK))
+                       if (sched->lflags & XNHTICK)
                                xnintr_host_tick(sched);
-                       if (testbits(sched->lflags, XNHDEFER))
+                       if (sched->lflags & XNHDEFER)
                                xntimer_next_local_shot(sched);
                }
                goto signal_unlock_and_exit;
@@ -1805,9 +1805,9 @@ reschedule:
                leave_root(prev);
                shadow = 0;
        } else if (xnthread_test_state(next, XNROOT)) {
-               if (testbits(sched->lflags, XNHTICK))
+               if (sched->lflags & XNHTICK)
                        xnintr_host_tick(sched);
-               if (testbits(sched->lflags, XNHDEFER))
+               if (sched->lflags & XNHDEFER)
                        xntimer_next_local_shot(sched);
                enter_root(next);
        }
diff --git a/kernel/cobalt/rtdm/drvlib.c b/kernel/cobalt/rtdm/drvlib.c
index 2b7f5d3..3e7fb0d 100644
--- a/kernel/cobalt/rtdm/drvlib.c
+++ b/kernel/cobalt/rtdm/drvlib.c
@@ -930,11 +930,9 @@ int rtdm_event_timedwait(rtdm_event_t *event, 
nanosecs_rel_t timeout,
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (unlikely(xnsynch_test_flags(&event->synch_base,
-                                       RTDM_SYNCH_DELETED)))
+       if (unlikely(event->synch_base.status & RTDM_SYNCH_DELETED))
                err = -EIDRM;
-       else if (likely(xnsynch_test_flags(&event->synch_base,
-                                          RTDM_EVENT_PENDING))) {
+       else if (likely(event->synch_base.status & RTDM_EVENT_PENDING)) {
                xnsynch_clear_flags(&event->synch_base, RTDM_EVENT_PENDING);
                xnselect_signal(&event->select_block, 0);
        } else {
@@ -1052,9 +1050,8 @@ int rtdm_event_select_bind(rtdm_event_t *event, 
rtdm_selector_t *selector,
        xnlock_get_irqsave(&nklock, s);
        err = xnselect_bind(&event->select_block,
                            binding, selector, type, fd_index,
-                           xnsynch_test_flags(&event->synch_base,
-                                              RTDM_SYNCH_DELETED |
-                                              RTDM_EVENT_PENDING));
+                           event->synch_base.status & (RTDM_SYNCH_DELETED |
+                                                      RTDM_EVENT_PENDING));
        xnlock_put_irqrestore(&nklock, s);
 
        if (err)
@@ -1211,7 +1208,7 @@ int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t 
timeout,
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (unlikely(xnsynch_test_flags(&sem->synch_base, RTDM_SYNCH_DELETED)))
+       if (unlikely(sem->synch_base.status & RTDM_SYNCH_DELETED))
                err = -EIDRM;
        else if (sem->value > 0) {
                if(!--sem->value)
@@ -1330,8 +1327,7 @@ int rtdm_sem_select_bind(rtdm_sem_t *sem, rtdm_selector_t 
*selector,
        err = xnselect_bind(&sem->select_block, binding, selector,
                            type, fd_index,
                            (sem->value > 0) ||
-                           xnsynch_test_flags(&sem->synch_base,
-                                              RTDM_SYNCH_DELETED));
+                           sem->synch_base.status & RTDM_SYNCH_DELETED);
        xnlock_put_irqrestore(&nklock, s);
 
        if (err)
@@ -1500,8 +1496,7 @@ int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, 
nanosecs_rel_t timeout,
 
        xnlock_get_irqsave(&nklock, s);
 
-       if (unlikely(xnsynch_test_flags(&mutex->synch_base,
-                                       RTDM_SYNCH_DELETED)))
+       if (unlikely(mutex->synch_base.status & RTDM_SYNCH_DELETED))
                err = -EIDRM;
        else if (likely(xnsynch_owner(&mutex->synch_base) == NULL))
                xnsynch_set_owner(&mutex->synch_base, curr_thread);
diff --git a/kernel/cobalt/synch.c b/kernel/cobalt/synch.c
index dfd8300..f958bff 100644
--- a/kernel/cobalt/synch.c
+++ b/kernel/cobalt/synch.c
@@ -166,7 +166,7 @@ int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t 
timeout,
        struct xnthread *thread = xnpod_current_thread();
        spl_t s;
 
-       XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));
+       XENO_BUGON(NUCLEUS, synch->status & XNSYNCH_OWNER);
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -174,7 +174,7 @@ int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t 
timeout,
                   "thread %p thread_name %s synch %p",
                   thread, xnthread_name(thread), synch);
 
-       if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
+       if ((synch->status & XNSYNCH_PRIO) == 0) /* i.e. FIFO */
                list_add_tail(&thread->plink, &synch->pendq);
        else /* i.e. priority-sorted */
                list_add_priff(thread, &synch->pendq, wprio, plink);
@@ -222,7 +222,7 @@ struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch 
*synch)
        struct xnthread *thread;
        spl_t s;
 
-       XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));
+       XENO_BUGON(NUCLEUS, synch->status & XNSYNCH_OWNER);
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -251,7 +251,7 @@ int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int 
nr)
        int nwakeups = 0;
        spl_t s;
 
-       XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));
+       XENO_BUGON(NUCLEUS, synch->status & XNSYNCH_OWNER);
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -310,7 +310,7 @@ void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, 
struct xnthread *sleeper
 {
        spl_t s;
 
-       XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));
+       XENO_BUGON(NUCLEUS, synch->status & XNSYNCH_OWNER);
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -401,7 +401,7 @@ int xnsynch_acquire(struct xnsynch *synch, xnticks_t 
timeout,
        atomic_long_t *lockp = xnsynch_fastlock(synch);
        spl_t s;
 
-       XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));
+       XENO_BUGON(NUCLEUS, (synch->status & XNSYNCH_OWNER) == 0);
 
        trace_mark(xn_nucleus, synch_acquire, "synch %p", synch);
 redo:
@@ -456,7 +456,7 @@ redo:
 
        xnsynch_detect_relaxed_owner(synch, thread);
 
-       if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
+       if ((synch->status & XNSYNCH_PRIO) == 0) /* i.e. FIFO */
                list_add_tail(&thread->plink, &synch->pendq);
        else if (thread->wprio > owner->wprio) {
                if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == 
synch) {
@@ -469,13 +469,13 @@ redo:
 
                list_add_priff(thread, &synch->pendq, wprio, plink);
 
-               if (testbits(synch->status, XNSYNCH_PIP)) {
+               if (synch->status & XNSYNCH_PIP) {
                        if (!xnthread_test_state(owner, XNBOOST)) {
                                owner->bprio = owner->cprio;
                                xnthread_set_state(owner, XNBOOST);
                        }
 
-                       if (testbits(synch->status, XNSYNCH_CLAIMED))
+                       if (synch->status & XNSYNCH_CLAIMED)
                                list_del(&synch->link);
                        else
                                __setbits(synch->status, XNSYNCH_CLAIMED);
@@ -634,7 +634,7 @@ void xnsynch_requeue_sleeper(struct xnthread *thread)
        struct xnsynch *synch = thread->wchan;
        struct xnthread *owner;
 
-       if (!testbits(synch->status, XNSYNCH_PRIO))
+       if ((synch->status & XNSYNCH_PRIO) == 0)
                return;
 
        list_del(&thread->plink);
@@ -650,7 +650,7 @@ void xnsynch_requeue_sleeper(struct xnthread *thread)
         * resource: we need to update the PI state.
         */
        synch->wprio = thread->wprio;
-       if (testbits(synch->status, XNSYNCH_CLAIMED)) {
+       if (synch->status & XNSYNCH_CLAIMED) {
                /*
                 * The resource is already claimed, just reorder the
                 * claim queue.
@@ -714,7 +714,7 @@ struct xnthread *__xnsynch_transfer_ownership(struct 
xnsynch *synch,
        xnthread_set_info(nextowner, XNWAKEN);
        xnpod_resume_thread(nextowner, XNPEND);
 
-       if (testbits(synch->status, XNSYNCH_CLAIMED))
+       if (synch->status & XNSYNCH_CLAIMED)
                xnsynch_clear_boost(synch, lastowner);
 
        nextownerh = xnsynch_fast_set_claimed(xnthread_handle(nextowner),
@@ -830,7 +830,7 @@ int xnsynch_flush(struct xnsynch *synch, int reason)
                   synch, reason);
 
        if (list_empty(&synch->pendq)) {
-               XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_CLAIMED));
+               XENO_BUGON(NUCLEUS, synch->status & XNSYNCH_CLAIMED);
                ret = XNSYNCH_DONE;
        } else {
                ret = XNSYNCH_RESCHED;
@@ -840,7 +840,7 @@ int xnsynch_flush(struct xnsynch *synch, int reason)
                        sleeper->wchan = NULL;
                        xnpod_resume_thread(sleeper, XNPEND);
                }
-               if (testbits(synch->status, XNSYNCH_CLAIMED))
+               if (synch->status & XNSYNCH_CLAIMED)
                        xnsynch_clear_boost(synch, synch->owner);
        }
 
@@ -882,7 +882,7 @@ void xnsynch_forget_sleeper(struct xnthread *thread)
        thread->wchan = NULL;
        list_del(&thread->plink);
 
-       if (!testbits(synch->status, XNSYNCH_CLAIMED))
+       if ((synch->status & XNSYNCH_CLAIMED) == 0)
                return;
 
        /* Find the highest priority needed to enforce the PIP. */
diff --git a/kernel/cobalt/timer.c b/kernel/cobalt/timer.c
index 056c41d..95c9c07 100644
--- a/kernel/cobalt/timer.c
+++ b/kernel/cobalt/timer.c
@@ -72,7 +72,7 @@ void xntimer_next_local_shot(xnsched_t *sched)
         * will be done on exit anyway. Also exit if there is no
         * pending timer.
         */
-       if (testbits(sched->status, XNINTCK))
+       if (sched->status & XNINTCK)
                return;
 
        h = xntimerq_it_begin(&sched->timerqueue, &it);
@@ -135,7 +135,7 @@ static inline int xntimer_heading_p(struct xntimer *timer)
        if (h == &timer->aplink)
                return 1;
 
-       if (testbits(sched->lflags, XNHDEFER)) {
+       if (sched->lflags & XNHDEFER) {
                h = xntimerq_it_next(&sched->timerqueue, &it, h);
                if (h == &timer->aplink)
                        return 1;
@@ -159,7 +159,7 @@ static void xntimer_adjust(xntimer_t *timer, xnsticks_t 
delta)
 
        xntimerh_date(&timer->aplink) -= delta;
 
-       if (!testbits(timer->status, XNTIMER_PERIODIC))
+       if ((timer->status & XNTIMER_PERIODIC) == 0)
                goto enqueue;
 
        period = xntimer_interval(timer);
@@ -177,7 +177,7 @@ static void xntimer_adjust(xntimer_t *timer, xnsticks_t 
delta)
                mod = xnarch_mod64(diff, period);
                xntimerh_date(&timer->aplink) += diff - mod;
        } else if (delta < 0
-                  && testbits(timer->status, XNTIMER_FIRED)
+                  && (timer->status & XNTIMER_FIRED)
                   && (xnsticks_t) (diff + period) <= 0) {
                /*
                 * Timer is periodic and NOT waiting for its first
@@ -215,7 +215,7 @@ void xntimer_adjust_all(xnsticks_t delta)
                for (holder = xntimerq_it_begin(q, &it); holder;
                     holder = xntimerq_it_next(q, &it, holder)) {
                        timer = aplink2timer(holder);
-                       if (testbits(timer->status, XNTIMER_REALTIME))
+                       if (timer->status & XNTIMER_REALTIME)
                                list_add_tail(&timer->adjlink, &adjq);
                }
 
@@ -285,7 +285,7 @@ int xntimer_start(xntimer_t *timer,
                   "timer %p value %Lu interval %Lu mode %u",
                   timer, value, interval, mode);
 
-       if (!testbits(timer->status, XNTIMER_DEQUEUED))
+       if ((timer->status & XNTIMER_DEQUEUED) == 0)
                xntimer_dequeue(timer);
 
        now = xnclock_read_raw();
@@ -517,8 +517,8 @@ void xntimer_tick(void)
                xnstat_counter_inc(&timer->fired);
 
                if (likely(timer != &sched->htimer)) {
-                       if (likely(!testbits(nkclock.status, XNTBLCK)
-                                  || testbits(timer->status, XNTIMER_NOBLCK))) 
{
+                       if (likely((nkclock.status & XNTBLCK) == 0 ||
+                                  (timer->status & XNTIMER_NOBLCK))) {
                                timer->handler(timer);
                                now = xnclock_read_raw();
                                /*
@@ -530,7 +530,7 @@ void xntimer_tick(void)
                                if (!xntimer_reload_p(timer))
                                        continue;
                                __setbits(timer->status, XNTIMER_FIRED);
-                       } else if (likely(!testbits(timer->status, 
XNTIMER_PERIODIC))) {
+                       } else if (likely((timer->status & XNTIMER_PERIODIC) == 
0)) {
                                /*
                                 * Make the blocked timer elapse again
                                 * at a reasonably close date in the
@@ -555,7 +555,7 @@ void xntimer_tick(void)
                         */
                        __setbits(sched->lflags, XNHTICK);
                        __clrbits(sched->lflags, XNHDEFER);
-                       if (!testbits(timer->status, XNTIMER_PERIODIC))
+                       if ((timer->status & XNTIMER_PERIODIC) == 0)
                                continue;
                }
 
@@ -714,8 +714,7 @@ int xntimer_migrate(xntimer_t *timer, xnsched_t *sched)
        if (sched == timer->sched)
                goto unlock_and_exit;
 
-       queued = !testbits(timer->status, XNTIMER_DEQUEUED);
-
+       queued = (timer->status & XNTIMER_DEQUEUED) == 0;
        if (queued) {
                if (timer->sched != xnpod_current_sched()) {
                        err = -EINVAL;
@@ -1062,7 +1061,7 @@ static int timer_vfile_show(struct 
xnvfile_regular_iterator *it, void *data)
        const char *tm_status, *wd_status = "";
 
        if (xnpod_active_p()) {
-               tm_status = testbits(nkclock.status, XNTBLCK) ? "locked" : "on";
+               tm_status = (nkclock.status & XNTBLCK) ? "locked" : "on";
 #ifdef CONFIG_XENO_OPT_WATCHDOG
                wd_status = "+watchdog";
 #endif /* CONFIG_XENO_OPT_WATCHDOG */
diff --git a/kernel/drivers/serial/16550A.c b/kernel/drivers/serial/16550A.c
index eadc1c0..f483691 100644
--- a/kernel/drivers/serial/16550A.c
+++ b/kernel/drivers/serial/16550A.c
@@ -173,7 +173,7 @@ static inline int rt_16550_rx_interrupt(struct 
rt_16550_context *ctx,
                        (RTSER_LSR_DATA | RTSER_LSR_OVERRUN_ERR |
                         RTSER_LSR_PARITY_ERR | RTSER_LSR_FRAMING_ERR |
                         RTSER_LSR_BREAK_IND));
-       } while (testbits(lsr, RTSER_LSR_DATA));
+       } while (lsr & RTSER_LSR_DATA);
 
        /* save new errors */
        ctx->status |= lsr;
@@ -240,7 +240,7 @@ static int rt_16550_interrupt(rtdm_irq_t * irq_context)
 
        while (1) {
                iir = rt_16550_reg_in(mode, base, IIR) & IIR_MASK;
-               if (testbits(iir, IIR_PIRQ))
+               if (iir & IIR_PIRQ)
                        break;
 
                if (iir == IIR_RX) {
@@ -274,7 +274,7 @@ static int rt_16550_interrupt(rtdm_irq_t * irq_context)
                ctx->ier_status &= ~IER_STAT;
        }
 
-       if (testbits(events, ctx->config.event_mask)) {
+       if (events & ctx->config.event_mask) {
                int old_events = ctx->ioc_events;
 
                ctx->last_timestamp = timestamp;
@@ -284,7 +284,7 @@ static int rt_16550_interrupt(rtdm_irq_t * irq_context)
                        rtdm_event_signal(&ctx->ioc_event);
        }
 
-       if (testbits(ctx->ier_status, IER_TX) && (ctx->out_npend == 0)) {
+       if ((ctx->ier_status & IER_TX) && (ctx->out_npend == 0)) {
                /* mask transmitter empty interrupt */
                ctx->ier_status &= ~IER_TX;
 
@@ -311,7 +311,7 @@ static int rt_16550_set_config(struct rt_16550_context *ctx,
        /* make line configuration atomic and IRQ-safe */
        rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
 
-       if (testbits(config->config_mask, RTSER_SET_BAUD)) {
+       if (config->config_mask & RTSER_SET_BAUD) {
                int dev_id = container_of(((void *)ctx),
                                          struct rtdm_dev_context,
                                          dev_private)->device->device_id;
@@ -325,17 +325,17 @@ static int rt_16550_set_config(struct rt_16550_context 
*ctx,
                rt_16550_reg_out(mode, base, DLM, baud_div >> 8);
        }
 
-       if (testbits(config->config_mask, RTSER_SET_PARITY))
+       if (config->config_mask & RTSER_SET_PARITY)
                ctx->config.parity = config->parity & PARITY_MASK;
-       if (testbits(config->config_mask, RTSER_SET_DATA_BITS))
+       if (config->config_mask & RTSER_SET_DATA_BITS)
                ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
-       if (testbits(config->config_mask, RTSER_SET_STOP_BITS))
+       if (config->config_mask & RTSER_SET_STOP_BITS)
                ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
 
-       if (testbits(config->config_mask, RTSER_SET_PARITY |
-                                         RTSER_SET_DATA_BITS |
-                                         RTSER_SET_STOP_BITS |
-                                         RTSER_SET_BAUD)) {
+       if (config->config_mask & (RTSER_SET_PARITY |
+                                  RTSER_SET_DATA_BITS |
+                                  RTSER_SET_STOP_BITS |
+                                  RTSER_SET_BAUD)) {
                rt_16550_reg_out(mode, base, LCR,
                                 (ctx->config.parity << 3) |
                                 (ctx->config.stop_bits << 2) |
@@ -344,7 +344,7 @@ static int rt_16550_set_config(struct rt_16550_context *ctx,
                ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
        }
 
-       if (testbits(config->config_mask, RTSER_SET_FIFO_DEPTH)) {
+       if (config->config_mask & RTSER_SET_FIFO_DEPTH) {
                ctx->config.fifo_depth = config->fifo_depth & FIFO_MASK;
                rt_16550_reg_out(mode, base, FCR,
                                 FCR_FIFO | FCR_RESET_RX | FCR_RESET_TX);
@@ -356,19 +356,18 @@ static int rt_16550_set_config(struct rt_16550_context 
*ctx,
 
        /* Timeout manipulation is not atomic. The user is supposed to take
           care not to use and change timeouts at the same time. */
-       if (testbits(config->config_mask, RTSER_SET_TIMEOUT_RX))
+       if (config->config_mask & RTSER_SET_TIMEOUT_RX)
                ctx->config.rx_timeout = config->rx_timeout;
-       if (testbits(config->config_mask, RTSER_SET_TIMEOUT_TX))
+       if (config->config_mask & RTSER_SET_TIMEOUT_TX)
                ctx->config.tx_timeout = config->tx_timeout;
-       if (testbits(config->config_mask, RTSER_SET_TIMEOUT_EVENT))
+       if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
                ctx->config.event_timeout = config->event_timeout;
 
-       if (testbits(config->config_mask, RTSER_SET_TIMESTAMP_HISTORY)) {
+       if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
                /* change timestamp history atomically */
                rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
 
-               if (testbits
-                   (config->timestamp_history, RTSER_RX_TIMESTAMP_HISTORY)) {
+               if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
                        if (!ctx->in_history) {
                                ctx->in_history = *in_history_ptr;
                                *in_history_ptr = NULL;
@@ -383,23 +382,22 @@ static int rt_16550_set_config(struct rt_16550_context 
*ctx,
                rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
        }
 
-       if (testbits(config->config_mask, RTSER_SET_EVENT_MASK)) {
+       if (config->config_mask & RTSER_SET_EVENT_MASK) {
                /* change event mask atomically */
                rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
 
                ctx->config.event_mask = config->event_mask & EVENT_MASK;
                ctx->ioc_events = 0;
 
-               if (testbits(config->event_mask, RTSER_EVENT_RXPEND) &&
+               if ((config->event_mask & RTSER_EVENT_RXPEND) &&
                    (ctx->in_npend > 0))
                        ctx->ioc_events |= RTSER_EVENT_RXPEND;
 
-               if (testbits(config->event_mask, RTSER_EVENT_ERRPEND)
+               if ((config->event_mask & RTSER_EVENT_ERRPEND)
                    && ctx->status)
                        ctx->ioc_events |= RTSER_EVENT_ERRPEND;
 
-               if (testbits(config->event_mask,
-                            RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+               if (config->event_mask & (RTSER_EVENT_MODEMHI | 
RTSER_EVENT_MODEMLO))
                        /* enable modem status interrupt */
                        ctx->ier_status |= IER_MODEM;
                else
@@ -410,7 +408,7 @@ static int rt_16550_set_config(struct rt_16550_context *ctx,
                rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
        }
 
-       if (testbits(config->config_mask, RTSER_SET_HANDSHAKE)) {
+       if (config->config_mask & RTSER_SET_HANDSHAKE) {
                /* change handshake atomically */
                rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
 
@@ -591,15 +589,14 @@ int rt_16550_ioctl(struct rtdm_dev_context *context,
                        config = &config_buf;
                }
 
-               if (testbits(config->config_mask, RTSER_SET_BAUD) &&
+               if ((config->config_mask & RTSER_SET_BAUD) &&
                    (config->baud_rate >
                     baud_base[context->device->device_id] ||
                     config->baud_rate <= 0))
                        /* invalid baudrate for this port */
                        return -EINVAL;
 
-               if (testbits(config->config_mask,
-                            RTSER_SET_TIMESTAMP_HISTORY)) {
+               if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
                        /*
                         * Reflect the call to non-RT as we will likely
                         * allocate or free the buffer.
@@ -607,8 +604,8 @@ int rt_16550_ioctl(struct rtdm_dev_context *context,
                        if (rtdm_in_rt_context())
                                return -ENOSYS;
 
-                       if (testbits(config->timestamp_history,
-                                    RTSER_RX_TIMESTAMP_HISTORY))
+                       if (config->timestamp_history &
+                           RTSER_RX_TIMESTAMP_HISTORY)
                                hist_buf = kmalloc(IN_BUFFER_SIZE *
                                                   sizeof(nanosecs_abs_t),
                                                   GFP_KERNEL);
@@ -695,8 +692,7 @@ int rt_16550_ioctl(struct rtdm_dev_context *context,
                while (!ctx->ioc_events) {
                        /* Only enable error interrupt
                           when the user waits for it. */
-                       if (testbits(ctx->config.event_mask,
-                                    RTSER_EVENT_ERRPEND)) {
+                       if (ctx->config.event_mask & RTSER_EVENT_ERRPEND) {
                                ctx->ier_status |= IER_STAT;
                                rt_16550_reg_out(mode, base, IER,
                                                 ctx->ier_status);
@@ -828,7 +824,7 @@ ssize_t rt_16550_read(struct rtdm_dev_context * context,
 
        while (1) {
                /* switch on error interrupt - the user is ready to listen */
-               if (!testbits(ctx->ier_status, IER_STAT)) {
+               if ((ctx->ier_status & IER_STAT) == 0) {
                        ctx->ier_status |= IER_STAT;
                        rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx),
                                         ctx->base_addr, IER,
@@ -836,7 +832,7 @@ ssize_t rt_16550_read(struct rtdm_dev_context * context,
                }
 
                if (ctx->status) {
-                       if (testbits(ctx->status, RTSER_LSR_BREAK_IND))
+                       if (ctx->status & RTSER_LSR_BREAK_IND)
                                ret = -EPIPE;
                        else
                                ret = -EIO;
diff --git a/kernel/drivers/serial/mpc52xx_uart.c 
b/kernel/drivers/serial/mpc52xx_uart.c
index 33109dc..d63e4c6 100644
--- a/kernel/drivers/serial/mpc52xx_uart.c
+++ b/kernel/drivers/serial/mpc52xx_uart.c
@@ -283,7 +283,7 @@ static inline int rt_mpc52xx_uart_rx_interrupt(struct 
rt_mpc52xx_uart_ctx *ctx,
        int psc_status;
 
        psc_status = in_be16(&ctx->port->psc->mpc52xx_psc_status);
-       while (testbits(psc_status, MPC52xx_PSC_SR_RXRDY)) {
+       while (psc_status & MPC52xx_PSC_SR_RXRDY) {
                /* read input character */
                rt_mpc52xx_uart_put_char(ctx, timestamp, psc_read_char(ctx));
                rbytes++;
@@ -324,7 +324,7 @@ static inline int rt_mpc52xx_uart_tx_interrupt(struct 
rt_mpc52xx_uart_ctx *ctx)
 {
        while (psc_raw_tx_rdy(ctx) && (ctx->out_npend > 0)) {
                if (ctx->config.rs485 &&
-                   !testbits(ctx->mcr_status, RTSER_MCR_RTS)) {
+                   (ctx->mcr_status & RTSER_MCR_RTS) == 0) {
                        /* switch RTS */
                        ctx->mcr_status |= RTSER_MCR_RTS;
                        dev_dbg(ctx->port->dev, "Set RTS, mcr_status=%#x\n",
@@ -332,8 +332,8 @@ static inline int rt_mpc52xx_uart_tx_interrupt(struct 
rt_mpc52xx_uart_ctx *ctx)
                        psc_set_mcr(ctx, ctx->mcr_status);
                }
                if ((ctx->config.rs485 ||
-                    testbits(ctx->config.event_mask, RTSER_EVENT_TXEMPTY)) &&
-                    !testbits(ctx->imr_status, MPC52xx_PSC_IMR_TXEMP)) {
+                    (ctx->config.event_mask & RTSER_EVENT_TXEMPTY) &&
+                    (ctx->imr_status & MPC52xx_PSC_IMR_TXEMP) == 0) {
                        /* enable tx-empty interrupt */
                        ctx->imr_status |= MPC52xx_PSC_IMR_TXEMP;
                        dev_dbg(ctx->port->dev, "Enable TXEMP interrupt, "
@@ -379,7 +379,7 @@ static int rt_mpc52xx_uart_interrupt(rtdm_irq_t 
*irq_context)
 
                if (psc_tx_empty(ctx)) {
                        if (ctx->config.rs485 &&
-                           testbits(ctx->mcr_status, RTSER_MCR_RTS)) {
+                           (ctx->mcr_status & RTSER_MCR_RTS)) {
                                /* reset RTS */
                                ctx->mcr_status &= ~RTSER_MCR_RTS;
                                dev_dbg(ctx->port->dev, "Reset RTS, "
@@ -427,7 +427,7 @@ static int rt_mpc52xx_uart_interrupt(rtdm_irq_t 
*irq_context)
        if (ctx->status)
                events |= RTSER_EVENT_ERRPEND;
 
-       if (testbits(events, ctx->config.event_mask)) {
+       if (events & ctx->config.event_mask) {
                int old_events = ctx->ioc_events;
 
                ctx->last_timestamp = timestamp;
@@ -437,7 +437,7 @@ static int rt_mpc52xx_uart_interrupt(rtdm_irq_t 
*irq_context)
                        rtdm_event_signal(&ctx->ioc_event);
        }
 
-       if (testbits(ctx->imr_status, MPC52xx_PSC_IMR_TXRDY) &&
+       if ((ctx->imr_status & MPC52xx_PSC_IMR_TXRDY) &&
            (ctx->out_npend == 0)) {
                psc_stop_tx(ctx);
                rtdm_event_signal(&ctx->out_event);
@@ -459,20 +459,20 @@ static int rt_mpc52xx_uart_set_config(struct 
rt_mpc52xx_uart_ctx *ctx,
        /* make line configuration atomic and IRQ-safe */
        rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
 
-       if (testbits(config->config_mask, RTSER_SET_BAUD))
+       if (config->config_mask & RTSER_SET_BAUD)
                ctx->config.baud_rate = config->baud_rate;
-       if (testbits(config->config_mask, RTSER_SET_PARITY))
+       if (config->config_mask & RTSER_SET_PARITY)
                ctx->config.parity = config->parity & PARITY_MASK;
-       if (testbits(config->config_mask, RTSER_SET_DATA_BITS))
+       if (config->config_mask & RTSER_SET_DATA_BITS)
                ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
-       if (testbits(config->config_mask, RTSER_SET_STOP_BITS))
+       if (config->config_mask & RTSER_SET_STOP_BITS)
                ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
-       if (testbits(config->config_mask, RTSER_SET_HANDSHAKE))
+       if (config->config_mask & RTSER_SET_HANDSHAKE)
                ctx->config.handshake = config->handshake;
 
-       if (testbits(config->config_mask, RTSER_SET_PARITY |
-                    RTSER_SET_DATA_BITS | RTSER_SET_STOP_BITS |
-                    RTSER_SET_BAUD | RTSER_SET_HANDSHAKE)) {
+       if (config->config_mask & (RTSER_SET_PARITY |
+                                  RTSER_SET_DATA_BITS | RTSER_SET_STOP_BITS |
+                                  RTSER_SET_BAUD | RTSER_SET_HANDSHAKE)) {
                struct mpc52xx_psc *psc = ctx->port->psc;
                unsigned char mr1 = 0, mr2 = 0;
                unsigned int divisor;
@@ -517,7 +517,7 @@ static int rt_mpc52xx_uart_set_config(struct 
rt_mpc52xx_uart_ctx *ctx,
                if (ctx->config.handshake == RTSER_RTSCTS_HAND) {
                        mr1 |= MPC52xx_PSC_MODE_RXRTS;
                        mr2 |= MPC52xx_PSC_MODE_TXCTS;
-               } else if (testbits(config->config_mask, RTSER_SET_HANDSHAKE)) {
+               } else if (config->config_mask & RTSER_SET_HANDSHAKE) {
                        ctx->mcr_status =
                                RTSER_MCR_DTR | RTSER_MCR_RTS | RTSER_MCR_OUT2;
                        psc_set_mcr(ctx, ctx->mcr_status);
@@ -557,7 +557,7 @@ static int rt_mpc52xx_uart_set_config(struct 
rt_mpc52xx_uart_ctx *ctx,
 
        }
 
-       if (testbits(config->config_mask, RTSER_SET_RS485)) {
+       if (config->config_mask & RTSER_SET_RS485) {
                ctx->config.rs485 = config->rs485;
                if (config->rs485) {
                        /* reset RTS */
@@ -572,19 +572,18 @@ static int rt_mpc52xx_uart_set_config(struct 
rt_mpc52xx_uart_ctx *ctx,
 
        /* Timeout manipulation is not atomic. The user is supposed to take
           care not to use and change timeouts at the same time. */
-       if (testbits(config->config_mask, RTSER_SET_TIMEOUT_RX))
+       if (config->config_mask & RTSER_SET_TIMEOUT_RX)
                ctx->config.rx_timeout = config->rx_timeout;
-       if (testbits(config->config_mask, RTSER_SET_TIMEOUT_TX))
+       if (config->config_mask & RTSER_SET_TIMEOUT_TX)
                ctx->config.tx_timeout = config->tx_timeout;
-       if (testbits(config->config_mask, RTSER_SET_TIMEOUT_EVENT))
+       if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
                ctx->config.event_timeout = config->event_timeout;
 
-       if (testbits(config->config_mask, RTSER_SET_TIMESTAMP_HISTORY)) {
+       if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
                /* change timestamp history atomically */
                rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
 
-               if (testbits
-                   (config->timestamp_history, RTSER_RX_TIMESTAMP_HISTORY)) {
+               if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
                        if (!ctx->in_history) {
                                ctx->in_history = *in_history_ptr;
                                *in_history_ptr = NULL;
@@ -599,27 +598,27 @@ static int rt_mpc52xx_uart_set_config(struct 
rt_mpc52xx_uart_ctx *ctx,
                rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
        }
 
-       if (testbits(config->config_mask, RTSER_SET_EVENT_MASK)) {
+       if (config->config_mask & RTSER_SET_EVENT_MASK) {
                /* change event mask atomically */
                rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
 
                ctx->config.event_mask = config->event_mask & EVENT_MASK;
                ctx->ioc_events = 0;
 
-               if (testbits(config->event_mask, RTSER_EVENT_RXPEND) &&
+               if ((config->event_mask & RTSER_EVENT_RXPEND) &&
                    (ctx->in_npend > 0))
                        ctx->ioc_events |= RTSER_EVENT_RXPEND;
 
-               if (testbits(config->event_mask, RTSER_EVENT_ERRPEND) &&
+               if ((config->event_mask & RTSER_EVENT_ERRPEND) &&
                    ctx->status)
                        ctx->ioc_events |= RTSER_EVENT_ERRPEND;
 
-               if (testbits(config->event_mask, RTSER_EVENT_TXEMPTY) &&
+               if ((config->event_mask & RTSER_EVENT_TXEMPTY) &&
                    !ctx->out_npend && ctx->tx_empty)
                        ctx->ioc_events |= RTSER_EVENT_TXEMPTY;
 
-               if (testbits(config->event_mask,
-                            RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+               if (config->event_mask &
+                   (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
                        psc_enable_ms(ctx);
                else
                        psc_disable_ms(ctx);
@@ -773,13 +772,12 @@ static int rt_mpc52xx_uart_ioctl(struct rtdm_dev_context 
*context,
                        config = &config_buf;
                }
 
-               if (testbits(config->config_mask, RTSER_SET_BAUD) &&
+               if (config->config_mask & RTSER_SET_BAUD) &&
                    (config->baud_rate <= 0))
                        /* invalid baudrate for this port */
                        return -EINVAL;
 
-               if (testbits(config->config_mask,
-                            RTSER_SET_TIMESTAMP_HISTORY)) {
+               if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
                        /*
                         * Reflect the call to non-RT as we will likely
                         * allocate or free the buffer.
@@ -787,8 +785,7 @@ static int rt_mpc52xx_uart_ioctl(struct rtdm_dev_context 
*context,
                        if (rtdm_in_rt_context())
                                return -ENOSYS;
 
-                       if (testbits(config->timestamp_history,
-                                    RTSER_RX_TIMESTAMP_HISTORY))
+                       if (config->timestamp_history & 
RTSER_RX_TIMESTAMP_HISTORY)
                                hist_buf = kmalloc(IN_BUFFER_SIZE *
                                                   sizeof(nanosecs_abs_t),
                                                   GFP_KERNEL);
@@ -996,7 +993,7 @@ static ssize_t rt_mpc52xx_uart_read(struct rtdm_dev_context 
*context,
 
        while (1) {
                if (ctx->status) {
-                       if (testbits(ctx->status, RTSER_LSR_BREAK_IND))
+                       if (ctx->status & RTSER_LSR_BREAK_IND)
                                ret = -EPIPE;
                        else
                                ret = -EIO;
diff --git a/kernel/drivers/serial/rt_imx_uart.c 
b/kernel/drivers/serial/rt_imx_uart.c
index 73b420e..3531f98 100644
--- a/kernel/drivers/serial/rt_imx_uart.c
+++ b/kernel/drivers/serial/rt_imx_uart.c
@@ -438,7 +438,7 @@ static int rt_imx_uart_int(rtdm_irq_t *irq_context)
 #endif
        }
 
-       if (testbits(events, ctx->config.event_mask)) {
+       if (events & ctx->config.event_mask) {
                int old_events = ctx->ioc_events;
 
                ctx->last_timestamp = timestamp;
@@ -448,7 +448,7 @@ static int rt_imx_uart_int(rtdm_irq_t *irq_context)
                        rtdm_event_signal(&ctx->ioc_event);
        }
 
-       if (testbits(ctx->ier_status, IER_TX) && (ctx->out_npend == 0)) {
+       if ((ctx->ier_status & IER_TX) && (ctx->out_npend == 0)) {
                rt_imx_uart_stop_tx(ctx);
                ctx->ier_status &= ~IER_TX;
                rtdm_event_signal(&ctx->out_event);
@@ -546,27 +546,26 @@ static int rt_imx_uart_set_config(struct rt_imx_uart_ctx 
*ctx,
 
        rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
 
-       if (testbits(config->config_mask, RTSER_SET_BAUD))
+       if (config->config_mask & RTSER_SET_BAUD)
                ctx->config.baud_rate = config->baud_rate;
-       if (testbits(config->config_mask, RTSER_SET_DATA_BITS))
+       if (config->config_mask & RTSER_SET_DATA_BITS)
                ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
-       if (testbits(config->config_mask, RTSER_SET_PARITY))
+       if (config->config_mask & RTSER_SET_PARITY)
                ctx->config.parity = config->parity & PARITY_MASK;
-       if (testbits(config->config_mask, RTSER_SET_STOP_BITS))
+       if (config->config_mask & RTSER_SET_STOP_BITS)
                ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
 
        /* Timeout manipulation is not atomic. The user is supposed to take
           care not to use and change timeouts at the same time. */
-       if (testbits(config->config_mask, RTSER_SET_TIMEOUT_RX))
+       if (config->config_mask & RTSER_SET_TIMEOUT_RX)
                ctx->config.rx_timeout = config->rx_timeout;
-       if (testbits(config->config_mask, RTSER_SET_TIMEOUT_TX))
+       if (config->config_mask & RTSER_SET_TIMEOUT_TX)
                ctx->config.tx_timeout = config->tx_timeout;
-       if (testbits(config->config_mask, RTSER_SET_TIMEOUT_EVENT))
+       if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
                ctx->config.event_timeout = config->event_timeout;
 
-       if (testbits(config->config_mask, RTSER_SET_TIMESTAMP_HISTORY)) {
-               if (testbits
-                   (config->timestamp_history, RTSER_RX_TIMESTAMP_HISTORY)) {
+       if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+               if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
                        if (!ctx->in_history) {
                                ctx->in_history = *in_history_ptr;
                                *in_history_ptr = NULL;
@@ -579,20 +578,20 @@ static int rt_imx_uart_set_config(struct rt_imx_uart_ctx 
*ctx,
                }
        }
 
-       if (testbits(config->config_mask, RTSER_SET_EVENT_MASK)) {
+       if (config->config_mask & RTSER_SET_EVENT_MASK) {
                ctx->config.event_mask = config->event_mask & EVENT_MASK;
                ctx->ioc_events = 0;
 
-               if (testbits(config->event_mask, RTSER_EVENT_RXPEND) &&
+               if ((config->event_mask & RTSER_EVENT_RXPEND) &&
                    (ctx->in_npend > 0))
                        ctx->ioc_events |= RTSER_EVENT_RXPEND;
 
-               if (testbits(config->event_mask, RTSER_EVENT_ERRPEND)
+               if ((config->event_mask & RTSER_EVENT_ERRPEND)
                    && ctx->status)
                        ctx->ioc_events |= RTSER_EVENT_ERRPEND;
        }
 
-       if (testbits(config->config_mask, RTSER_SET_HANDSHAKE)) {
+       if (config->config_mask & RTSER_SET_HANDSHAKE) {
                ctx->config.handshake = config->handshake;
 
                switch (ctx->config.handshake) {
@@ -607,12 +606,12 @@ static int rt_imx_uart_set_config(struct rt_imx_uart_ctx 
*ctx,
        }
 
        /* configure hardware with new parameters */
-       if (testbits(config->config_mask, (RTSER_SET_BAUD |
-                                          RTSER_SET_PARITY |
-                                          RTSER_SET_DATA_BITS |
-                                          RTSER_SET_STOP_BITS |
-                                          RTSER_SET_EVENT_MASK |
-                                          RTSER_SET_HANDSHAKE))) {
+       if (config->config_mask & (RTSER_SET_BAUD |
+                                  RTSER_SET_PARITY |
+                                  RTSER_SET_DATA_BITS |
+                                  RTSER_SET_STOP_BITS |
+                                  RTSER_SET_EVENT_MASK |
+                                  RTSER_SET_HANDSHAKE)) {
                struct rt_imx_uart_port *port = ctx->port;
                unsigned int ucr2, old_ucr1, old_txrxen;
                unsigned int baud = ctx->config.baud_rate;
@@ -690,8 +689,8 @@ static int rt_imx_uart_set_config(struct rt_imx_uart_ctx 
*ctx,
                /* set the parity, stop bits and data size */
                writel(ucr2 | old_txrxen, port->membase + UCR2);
 
-               if (testbits(config->event_mask,
-                            RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+               if (config->event_mask &
+                   (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
                        rt_imx_uart_enable_ms(ctx);
 
                ctx->status = 0;
@@ -901,14 +900,13 @@ static int rt_imx_uart_ioctl(struct rtdm_dev_context 
*context,
                        config = &config_buf;
                }
 
-               if (testbits(config->config_mask, RTSER_SET_BAUD) &&
+               if ((config->config_mask & RTSER_SET_BAUD) &&
                    (config->baud_rate > clk_get_rate(ctx->port->clk) / 16 ||
                     config->baud_rate <= 0))
                        /* invalid baudrate for this port */
                        return -EINVAL;
 
-               if (testbits(config->config_mask,
-                            RTSER_SET_TIMESTAMP_HISTORY)) {
+               if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
                        /*
                         * Reflect the call to non-RT as we will likely
                         * allocate or free the buffer.
@@ -916,8 +914,7 @@ static int rt_imx_uart_ioctl(struct rtdm_dev_context 
*context,
                        if (rtdm_in_rt_context())
                                return -ENOSYS;
 
-                       if (testbits(config->timestamp_history,
-                                    RTSER_RX_TIMESTAMP_HISTORY))
+                       if (config->timestamp_history & 
RTSER_RX_TIMESTAMP_HISTORY)
                                hist_buf = kmalloc(IN_BUFFER_SIZE *
                                                   sizeof(nanosecs_abs_t),
                                                   GFP_KERNEL);
@@ -1002,8 +999,7 @@ static int rt_imx_uart_ioctl(struct rtdm_dev_context 
*context,
                while (!ctx->ioc_events) {
                        /* Only enable error interrupt
                           when the user waits for it. */
-                       if (testbits(ctx->config.event_mask,
-                                    RTSER_EVENT_ERRPEND)) {
+                       if (ctx->config.event_mask & RTSER_EVENT_ERRPEND) {
                                ctx->ier_status |= IER_STAT;
 #ifdef FIXME
                                rt_imx_uart_reg_out(mode, base, IER,
@@ -1131,7 +1127,7 @@ ssize_t rt_imx_uart_read(struct rtdm_dev_context *context,
 
        while (1) {
                if (ctx->status) {
-                       if (testbits(ctx->status, RTSER_LSR_BREAK_IND))
+                       if (ctx->status & RTSER_LSR_BREAK_IND)
                                ret = -EPIPE;
                        else
                                ret = -EIO;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to