Previously logging about rxq scheduling was done in a code branch with the selection of the PMD thread core after checking that a numa was selected.
By splitting out the logging from the PMD thread core selection, it can simplify the code complexity and make it more extendable for future additions. Also, minor updates to a couple of variables to improve readability and fix a log indent while working on this code block. There is no user visibile change in behaviour or logs. Signed-off-by: Kevin Traynor <[email protected]> --- lib/dpif-netdev.c | 59 +++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 40a62fd9f..dc414ae8f 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -5991,5 +5991,6 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list, struct sched_pmd *sched_pmd = NULL; struct sched_numa *numa; - int numa_id; + int port_numa_id; + int pmd_numa_id; uint64_t proc_cycles; char rxq_cyc_log[MAX_RXQ_CYC_STRLEN]; @@ -6003,7 +6004,8 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list, /* Store the cycles for this rxq as we will log these later. */ proc_cycles = dp_netdev_rxq_get_cycles(rxq, RXQ_CYCLES_PROC_HIST); - /* Select the numa that should be used for this rxq. */ - numa_id = netdev_get_numa_id(rxq->port->netdev); - numa = sched_numa_list_lookup(numa_list, numa_id); + port_numa_id = netdev_get_numa_id(rxq->port->netdev); + + /* Select numa. */ + numa = sched_numa_list_lookup(numa_list, port_numa_id); /* Check if numa has no PMDs or no non-isolated PMDs. */ @@ -6023,33 +6025,36 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list, if (numa) { - if (numa->numa_id != numa_id) { + /* Select the PMD that should be used for this rxq. */ + sched_pmd = sched_pmd_next(numa, algo, + proc_cycles ? true : false); + } + + /* Check that a pmd has been selected. */ + if (sched_pmd) { + pmd_numa_id = sched_pmd->numa->numa_id; + /* Check if selected pmd numa matches port numa. */ + if (pmd_numa_id != port_numa_id) { VLOG(level, "There's no available (non-isolated) pmd thread " "on numa node %d. Port \'%s\' rx queue %d will " "be assigned to a pmd on numa node %d. " "This may lead to reduced performance.", - numa_id, netdev_rxq_get_name(rxq->rx), - netdev_rxq_get_queue_id(rxq->rx), numa->numa_id); + port_numa_id, netdev_rxq_get_name(rxq->rx), + netdev_rxq_get_queue_id(rxq->rx), pmd_numa_id); } - - /* Select the PMD that should be used for this rxq. */ - sched_pmd = sched_pmd_next(numa, algo, proc_cycles ? true : false); - if (sched_pmd) { - VLOG(level, "Core %2u on numa node %d assigned port \'%s\' " - "rx queue %d%s.", - sched_pmd->pmd->core_id, sched_pmd->pmd->numa_id, - netdev_rxq_get_name(rxq->rx), - netdev_rxq_get_queue_id(rxq->rx), - get_rxq_cyc_log(rxq_cyc_log, algo, proc_cycles)); - sched_pmd_add_rxq(sched_pmd, rxq, proc_cycles); - } - } - if (!sched_pmd) { + VLOG(level, "Core %2u on numa node %d assigned port \'%s\' " + "rx queue %d%s.", + sched_pmd->pmd->core_id, sched_pmd->pmd->numa_id, + netdev_rxq_get_name(rxq->rx), + netdev_rxq_get_queue_id(rxq->rx), + get_rxq_cyc_log(rxq_cyc_log, algo, proc_cycles)); + sched_pmd_add_rxq(sched_pmd, rxq, proc_cycles); + } else { VLOG(level == VLL_DBG ? level : VLL_WARN, - "No non-isolated pmd on any numa available for " - "port \'%s\' rx queue %d%s. " - "This rx queue will not be polled.", - netdev_rxq_get_name(rxq->rx), - netdev_rxq_get_queue_id(rxq->rx), - get_rxq_cyc_log(rxq_cyc_log, algo, proc_cycles)); + "No non-isolated pmd on any numa available for " + "port \'%s\' rx queue %d%s. " + "This rx queue will not be polled.", + netdev_rxq_get_name(rxq->rx), + netdev_rxq_get_queue_id(rxq->rx), + get_rxq_cyc_log(rxq_cyc_log, algo, proc_cycles)); } } -- 2.34.1 _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
