This commit fixes the cq index checks when unlinking
ports/queues while the scheduler core is running.
Previously, the == comparison could be "skipped" if
in particular corner cases. With the check being changed
to >= this is resolved as the cq idx gets reset to zero.

Bugzilla ID: 60
Fixes: 617995dfc5b2 ("event/sw: add scheduling logic")

Suggested-by: Matias Elo <matias....@nokia.com>
Signed-off-by: Harry van Haaren <harry.van.haa...@intel.com>

---

Cc: sta...@dpdk.org

@Matias,

When testing this patch with your provided test case as per attachment
to bug #60 in Bugzilla, I don't see any events arriving to port 0.
Hence, I believe this to be the correct behaviour, if you can confirm
that'd be awesome!

Regards, -Harry

---
 drivers/event/sw/sw_evdev_scheduler.c | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/drivers/event/sw/sw_evdev_scheduler.c 
b/drivers/event/sw/sw_evdev_scheduler.c
index e3a41e02f..fb5d44630 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -51,9 +51,11 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid 
* const qid,
                int cq = fid->cq;
 
                if (cq < 0) {
-                       uint32_t cq_idx = qid->cq_next_tx++;
-                       if (qid->cq_next_tx == qid->cq_num_mapped_cqs)
+                       uint32_t cq_idx;
+                       if (qid->cq_next_tx >= qid->cq_num_mapped_cqs)
                                qid->cq_next_tx = 0;
+                       cq_idx = qid->cq_next_tx++;
+
                        cq = qid->cq_map[cq_idx];
 
                        /* find least used */
@@ -140,9 +142,10 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct 
sw_qid * const qid,
                do {
                        if (++cq_check_count > qid->cq_num_mapped_cqs)
                                goto exit;
-                       cq = qid->cq_map[cq_idx];
-                       if (++cq_idx == qid->cq_num_mapped_cqs)
+                       if (cq_idx >= qid->cq_num_mapped_cqs)
                                cq_idx = 0;
+                       cq = qid->cq_map[cq_idx++];
+
                } while (rte_event_ring_free_count(
                                sw->ports[cq].cq_worker_ring) == 0 ||
                                sw->ports[cq].inflights == SW_PORT_HIST_LIST);
@@ -220,7 +223,7 @@ sw_schedule_qid_to_cq(struct sw_evdev *sw)
                int iq_num = PKT_MASK_TO_IQ(qid->iq_pkt_mask);
 
                /* zero mapped CQs indicates directed */
-               if (iq_num >= SW_IQS_MAX)
+               if (iq_num >= SW_IQS_MAX || qid->cq_num_mapped_cqs == 0)
                        continue;
 
                uint32_t pkts_done = 0;
-- 
2.17.1

Reply via email to