When swapping queues from a pmd thread to another (q0 polled by pmd0/q1 polled by pmd1 -> q1 polled by pmd0/q0 polled by pmd1), the current "Step 5" puts both pmds to sleep waiting for the control thread to wake them up later.
Prefer to make them spin in such a case to avoid sleeping an undeterministic amount of time. Signed-off-by: David Marchand <david.march...@redhat.com> Acked-by: Eelco Chaudron <echau...@redhat.com> --- lib/dpif-netdev.c | 47 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 23cf6a6..243c1ce 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -683,6 +683,7 @@ struct dp_netdev_pmd_thread { struct seq *reload_seq; uint64_t last_reload_seq; atomic_bool reload; /* Do we need to reload ports? */ + atomic_bool wait_for_reload; /* Can we busy wait for the next reload? */ atomic_bool exit; /* For terminating the pmd thread. */ pthread_t thread; unsigned core_id; /* CPU core id of this pmd thread. */ @@ -4896,6 +4897,33 @@ reconfigure_datapath(struct dp_netdev *dp) HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) { if (poll->rxq->pmd != pmd) { dp_netdev_del_rxq_from_pmd(pmd, poll); + + /* This pmd might sleep after this step reload if it has no + * rxq remaining. Can we tell it to busy wait for new rxq at + * Step 6 ? */ + if (hmap_count(&pmd->poll_list) == 0) { + HMAP_FOR_EACH (port, node, &dp->ports) { + int qid; + + if (!netdev_is_pmd(port->netdev)) { + continue; + } + + for (qid = 0; qid < port->n_rxq; qid++) { + struct dp_netdev_rxq *q = &port->rxqs[qid]; + + if (q->pmd == pmd) { + atomic_store_relaxed(&q->pmd->wait_for_reload, + true); + break; + } + } + + if (qid != port->n_rxq) { + break; + } + } + } } } ovs_mutex_unlock(&pmd->port_mutex); @@ -5413,7 +5441,9 @@ pmd_thread_main(void *f_) struct pmd_perf_stats *s = &pmd->perf_stats; unsigned int lc = 0; struct polled_queue *poll_list; + bool wait_for_reload = false; bool exiting; + bool reload; int poll_cnt; int i; int process_packets = 0; @@ -5441,9 +5471,16 @@ reload: } if (!poll_cnt) { - while (seq_read(pmd->reload_seq) == pmd->last_reload_seq) { - seq_wait(pmd->reload_seq, pmd->last_reload_seq); - poll_block(); + /* Don't sleep, control thread will ask for a reload shortly. */ + if (wait_for_reload) { + do { + atomic_read_relaxed(&pmd->reload, &reload); + } while (!reload); + } else { + while (seq_read(pmd->reload_seq) == pmd->last_reload_seq) { + seq_wait(pmd->reload_seq, pmd->last_reload_seq); + poll_block(); + } } lc = UINT_MAX; } @@ -5482,8 +5519,6 @@ reload: } if (lc++ > 1024) { - bool reload; - lc = 0; coverage_try_clear(); @@ -5503,6 +5538,7 @@ reload: ovs_mutex_unlock(&pmd->perf_stats.stats_mutex); poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list); + atomic_read_relaxed(&pmd->wait_for_reload, &wait_for_reload); atomic_read_relaxed(&pmd->exit, &exiting); /* Signal here to make sure the pmd finishes * reloading the updated configuration. */ @@ -5839,6 +5875,7 @@ dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd) { uint32_t old; + atomic_store_relaxed(&pmd->wait_for_reload, false); atomic_store_relaxed(&pmd->reload, false); pmd->last_reload_seq = seq_read(pmd->reload_seq); atomic_sub_explicit(&pmd->dp->reloading_pmds, 1, &old, -- 1.8.3.1 _______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev