From: Petri Savolainen <petri.savolai...@linaro.org>

Remove unnecessary checks from critical sections of scheduled
queue enqueue and dequeue operations. Parallelism improves when
the number of instructions and (potential) cache misses decreases
when holding the lock.

Signed-off-by: Petri Savolainen <petri.savolai...@linaro.org>
---
/** Email created from pull request 699 (psavol:master-sched-optim-clean-ups)
 ** https://github.com/Linaro/odp/pull/699
 ** Patch: https://github.com/Linaro/odp/pull/699.patch
 ** Base sha: 33fbc04b6373960ec3f84de4e7e7b34c49d71508
 ** Merge commit sha: 32d7a11f22e6f2e1e378b653993c5377d4116d8f
 **/
 platform/linux-generic/odp_queue_basic.c | 18 +++++++-----------
 1 file changed, 7 insertions(+), 11 deletions(-)

diff --git a/platform/linux-generic/odp_queue_basic.c 
b/platform/linux-generic/odp_queue_basic.c
index 7e8b7e34d..61cf8a56c 100644
--- a/platform/linux-generic/odp_queue_basic.c
+++ b/platform/linux-generic/odp_queue_basic.c
@@ -681,12 +681,6 @@ static inline int _sched_queue_enq_multi(odp_queue_t 
handle,
 
        LOCK(queue);
 
-       if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
-               UNLOCK(queue);
-               ODP_ERR("Bad queue status\n");
-               return -1;
-       }
-
        num_enq = ring_st_enq_multi(ring_st, queue->s.ring_data,
                                    queue->s.ring_mask, buf_idx, num);
 
@@ -712,7 +706,7 @@ static inline int _sched_queue_enq_multi(odp_queue_t handle,
 int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
                    int update_status)
 {
-       int num_deq;
+       int num_deq, status;
        ring_st_t *ring_st;
        queue_entry_t *queue = qentry_from_index(queue_index);
        int status_sync = sched_fn->status_sync;
@@ -722,7 +716,9 @@ int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], 
int max_num,
 
        LOCK(queue);
 
-       if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
+       status = queue->s.status;
+
+       if (odp_unlikely(status < QUEUE_STATUS_READY)) {
                /* Bad queue, or queue has been destroyed.
                 * Scheduler finalizes queue destroy after this. */
                UNLOCK(queue);
@@ -734,10 +730,10 @@ int sched_queue_deq(uint32_t queue_index, odp_event_t 
ev[], int max_num,
 
        if (num_deq == 0) {
                /* Already empty queue */
-               if (update_status && queue->s.status == QUEUE_STATUS_SCHED) {
+               if (update_status && status == QUEUE_STATUS_SCHED) {
                        queue->s.status = QUEUE_STATUS_NOTSCHED;
 
-                       if (status_sync)
+                       if (odp_unlikely(status_sync))
                                sched_fn->unsched_queue(queue->s.index);
                }
 
@@ -746,7 +742,7 @@ int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], 
int max_num,
                return 0;
        }
 
-       if (status_sync && queue->s.type == ODP_QUEUE_TYPE_SCHED)
+       if (odp_unlikely(status_sync))
                sched_fn->save_context(queue->s.index);
 
        UNLOCK(queue);

Reply via email to