On 08/24/2017 09:44 AM, Darrell Ball wrote:
>
> On 8/23/17, 6:34 AM, "Kevin Traynor" <[email protected]> wrote:
>
> Count the cycles used for processing an rxq during the
> pmd rxq interval. As this is an in flight counter and
> pmds run independently, also store the total cycles used
> during the last full interval.
>
> Signed-off-by: Kevin Traynor <[email protected]>
> ---
> lib/dpif-netdev.c | 78
> +++++++++++++++++++++++++++++++++++++++++++++++++++----
> 1 file changed, 73 insertions(+), 5 deletions(-)
>
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 51d4050..5670c55 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -182,4 +182,8 @@ struct emc_cache {
> #define DPCLS_OPTIMIZATION_INTERVAL 1000
>
> +/* Time in ms of the interval in which rxq processing cycles used in
> + * rxq to pmd assignments is measured and stored. */
> +#define PMD_RXQ_INTERVAL_LEN 10000
> +
> /* Number of intervals for which cycles are stored
> * and used during rxq to pmd assignment. */
> @@ -568,4 +572,6 @@ struct dp_netdev_pmd_thread {
> /* Periodically sort subtable vectors according to hit
> frequencies */
> long long int next_optimization;
> + /* Periodically store the processing cycles used for each rxq. */
>
> The above comment seems a bit deceiving; should it say something like ‘the
> end of the next time interval
> for which processing cycles are stored for each rxq’
>
you're right, changed it
> + long long int rxq_interval;
>
>
> /* Statistics. */
> @@ -694,5 +700,16 @@ static void pmd_load_cached_ports(struct
> dp_netdev_pmd_thread *pmd)
> OVS_REQUIRES(pmd->port_mutex);
> static inline void
> -dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd);
> +dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
> + struct polled_queue *poll_list, int
> poll_cnt);
> +static void
> +dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
> + enum rxq_cycles_counter_type type,
> + unsigned long long cycles);
> +static uint64_t
> +dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
> + enum rxq_cycles_counter_type type);
> +static void
> +dp_netdev_rxq_set_interval(struct dp_netdev_rxq *rx,
> + unsigned long long cycles);
> static void
> dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread
> *pmd,
> @@ -3124,4 +3141,29 @@ cycles_count_intermediate(struct
> dp_netdev_pmd_thread *pmd,
> }
>
> +static void
> +dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
> + enum rxq_cycles_counter_type type,
> + unsigned long long cycles)
> +{
> + atomic_store_relaxed(&rx->cycles[type], cycles);
> +}
> +
> +static uint64_t
> +dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
> + enum rxq_cycles_counter_type type)
> +{
> + unsigned long long tmp;
> + atomic_read_relaxed(&rx->cycles[type], &tmp);
> + return tmp;
>
> Could we use a name like ‘processing_cycles’ or something other than ‘tmp’ ?
>
sure, changed
>
> +}
> +
> +static void
> +dp_netdev_rxq_set_interval(struct dp_netdev_rxq *rx,
> + unsigned long long cycles)
>
>
> _set_interval seems a bit vague ?; well at least to me ?
> _set_intrv_cycles ?
> same for _get_interval for patch 4 ?
completely agree, adding _cycles makes it clearer. changed to
_set_intrvl_cycles
>
>
> +{
> + atomic_store_relaxed(&rx->cycles_intrvl[rx->intrvl_idx++
> + % PMD_RXQ_INTERVAL_MAX],
> cycles);
> +}
> +
> static int
> dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
> @@ -3168,5 +3210,5 @@ port_reconfigure(struct dp_netdev_port *port)
> {
> struct netdev *netdev = port->netdev;
> - int i, err;
> + int i, err, last_nrxq, j;
>
> move ‘j’ down to the for loop and use unsigned
changed
> move last_nrxq down to where it is first used
changed
>
> port->need_reconfigure = false;
> @@ -3177,4 +3219,5 @@ port_reconfigure(struct dp_netdev_port *port)
> port->rxqs[i].rx = NULL;
> }
> + last_nrxq = port->n_rxq;
>
> uint32_t last_nrxq = port->n_rxq;
>
Made this change, but used unsigned to be consistent with n_rxq type
> port->n_rxq = 0;
>
> @@ -3197,4 +3240,12 @@ port_reconfigure(struct dp_netdev_port *port)
> for (i = 0; i < netdev_n_rxq(netdev); i++) {
> port->rxqs[i].port = port;
> + if (i >= last_nrxq) {
> + /* Only reset cycle stats for new queues */
> + dp_netdev_rxq_set_cycles(&port->rxqs[i],
> RXQ_CYCLES_PROC_CURR, 0);
> + dp_netdev_rxq_set_cycles(&port->rxqs[i],
> RXQ_CYCLES_PROC_HIST, 0);
> + for (j = 0; j < PMD_RXQ_INTERVAL_MAX; j++) {
>
> for (uint8_t j = 0; j < PMD_RXQ_INTERVAL_MAX; j++) {
>
changed to unsigned as per previous comment
>
> + dp_netdev_rxq_set_interval(&port->rxqs[i], 0);
> + }
> + }
> err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i);
> if (err) {
> @@ -3877,5 +3928,5 @@ reload:
> dp_netdev_process_rxq_port(pmd, poll_list[i].rxq->rx,
> poll_list[i].port_no);
> - cycles_count_intermediate(pmd, NULL,
> + cycles_count_intermediate(pmd, poll_list[i].rxq,
> process_packets ?
> PMD_CYCLES_PROCESSING
> :
> PMD_CYCLES_IDLE);
> @@ -3888,5 +3939,5 @@ reload:
>
> coverage_try_clear();
> - dp_netdev_pmd_try_optimize(pmd);
> + dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt);
> if (!ovsrcu_try_quiesce()) {
> emc_cache_slow_sweep(&pmd->flow_cache);
> @@ -4332,4 +4383,5 @@ dp_netdev_configure_pmd(struct
> dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
> cmap_init(&pmd->classifiers);
> pmd->next_optimization = time_msec() +
> DPCLS_OPTIMIZATION_INTERVAL;
> + pmd->rxq_interval = time_msec() + PMD_RXQ_INTERVAL_LEN;
> hmap_init(&pmd->poll_list);
> hmap_init(&pmd->tx_ports);
> @@ -5769,8 +5821,24 @@ dpcls_sort_subtable_vector(struct dpcls *cls)
>
> static inline void
> -dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd)
> +dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
> + struct polled_queue *poll_list, int
> poll_cnt)
> {
> struct dpcls *cls;
> long long int now = time_msec();
> + int i;
>
> move ‘i’ to for loop and use unsigned
>
changed
>
> + uint64_t rxq_cyc_curr;
>
> move inside for loop
>
changed
> +
> + if (now > pmd->rxq_interval) {
> + /* Get the cycles that were used to process each queue and
> store. */
> + for (i = 0; i < poll_cnt; i++) {
>
> for (uint32_t i = 0; i < poll_cnt; i++) {
>
changed to unsigned
> + rxq_cyc_curr = dp_netdev_rxq_get_cycles(poll_list[i].rxq,
> +
> RXQ_CYCLES_PROC_CURR);
> + dp_netdev_rxq_set_interval(poll_list[i].rxq,
> rxq_cyc_curr);
> + dp_netdev_rxq_set_cycles(poll_list[i].rxq,
> RXQ_CYCLES_PROC_CURR,
> + 0);
> + }
> + /* Start new measuring interval */
> + pmd->rxq_interval = now + PMD_RXQ_INTERVAL_LEN;
> + }
>
> if (now > pmd->next_optimization) {
> --
> 1.8.3.1
>
>
>
>
>
>
>
> _______________________________________________
> dev mailing list
> [email protected]
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev