On 10/11/2019 23:20, Ilya Maximets wrote:
> On 29.10.2019 15:50, Sriram Vatala wrote:> @@ -2388,12 +2412,16 @@ 
> __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
>>           }
>>       } while (cnt && (retries++ < max_retries));
>>   
>> +    tx_failure = cnt;
>>       rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
>>   
>>       rte_spinlock_lock(&dev->stats_lock);
>>       netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
>>                                            cnt + dropped);
>> -    dev->tx_retries += MIN(retries, max_retries);
>> +    sw_stats->tx_retries += MIN(retries, max_retries);
>> +    sw_stats->tx_failure_drops += tx_failure;
>> +    sw_stats->tx_mtu_exceeded_drops += mtu_drops;
>> +    sw_stats->tx_qos_drops += qos_drops;
> 
> Kevin pointed to this part of code in hope that we can move this to
> a separate function and reuse in his review for v9.  This code catches
> my eyes too.  I don't think that we can reuse this part, at least this
> will be not very efficient in current situation (clearing of the unused
> fields in a stats structure will be probably needed before calling such
> a common function, but ETH tx uses only half of the struct).
> 
> But there is another thing here.  We already have special functions
> for vhost tx/rx counters.  And it looks strange when we're not using
> these functions to update tx/rx failure counters.
> 
> Suggesting following incremental.
> Kevin, Sriram, please, share your thoughts.
> 

The incremental patch looks good, thanks. One additional thing is that
OVS_REQUIRES(dev->stats_lock) annotations can be used for the vhost
rx/tx update counter functions now (even if it seems unlikely someone
would miss doing that).

@Sriram, see below or you can check similar OVS_REQUIRES usage elsewhere
in the file.

> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
> index 3cb7023a8..02120a379 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -2169,16 +2169,18 @@ netdev_dpdk_vhost_update_rx_size_counters(struct 
> netdev_stats *stats,
>   }
>   
>   static inline void
> -netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
> +netdev_dpdk_vhost_update_rx_counters(struct netdev_dpdk *dev,
>                                        struct dp_packet **packets, int count,
> -                                     int dropped)
> +                                     int qos_drops)
    OVS_REQUIRES(dev->stats_lock)
>   {
> -    int i;
> -    unsigned int packet_size;
> +    struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats;
> +    struct netdev_stats *stats = &dev->stats;
>       struct dp_packet *packet;
> +    unsigned int packet_size;
> +    int i;
>   
>       stats->rx_packets += count;
> -    stats->rx_dropped += dropped;
> +    stats->rx_dropped += qos_drops;
>       for (i = 0; i < count; i++) {
>           packet = packets[i];
>           packet_size = dp_packet_size(packet);
> @@ -2201,6 +2203,8 @@ netdev_dpdk_vhost_update_rx_counters(struct 
> netdev_stats *stats,
>   
>           stats->rx_bytes += packet_size;
>       }
> +
> +    sw_stats->rx_qos_drops += qos_drops;
>   }
>   
>   /*
> @@ -2213,7 +2217,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
>       struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
>       struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
>       uint16_t nb_rx = 0;
> -    uint16_t dropped = 0;
> +    uint16_t qos_drops = 0;
>       int qid = rxq->queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
>       int vid = netdev_dpdk_get_vid(dev);
>   
> @@ -2240,17 +2244,16 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
>       }
>   
>       if (policer) {
> -        dropped = nb_rx;
> +        qos_drops = nb_rx;
>           nb_rx = ingress_policer_run(policer,
>                                       (struct rte_mbuf **) batch->packets,
>                                       nb_rx, true);
> -        dropped -= nb_rx;
> +        qos_drops -= nb_rx;
>       }
>   
>       rte_spinlock_lock(&dev->stats_lock);
> -    netdev_dpdk_vhost_update_rx_counters(&dev->stats, batch->packets,
> -                                         nb_rx, dropped);
> -    dev->sw_stats->rx_qos_drops += dropped;
> +    netdev_dpdk_vhost_update_rx_counters(dev, batch->packets,
> +                                         nb_rx, qos_drops);
>       rte_spinlock_unlock(&dev->stats_lock);
>   
>       batch->count = nb_rx;
> @@ -2360,13 +2363,18 @@ netdev_dpdk_filter_packet_len(struct netdev_dpdk 
> *dev, struct rte_mbuf **pkts,
>   }
>   
>   static inline void
> -netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
> +netdev_dpdk_vhost_update_tx_counters(struct netdev_dpdk *dev,
>                                        struct dp_packet **packets,
>                                        int attempted,
> -                                     int dropped)
> +                                     struct netdev_dpdk_sw_stats 
> *sw_stats_add)
    OVS_REQUIRES(dev->stats_lock)
>   {
> -    int i;
> +    struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats;
> +    int dropped = sw_stats_add->tx_mtu_exceeded_drops +
> +                  sw_stats_add->tx_qos_drops +
> +                  sw_stats_add->tx_failure_drops;
> +    struct netdev_stats *stats = &dev->stats;
>       int sent = attempted - dropped;
> +    int i;
>   
>       stats->tx_packets += sent;
>       stats->tx_dropped += dropped;
> @@ -2374,6 +2382,11 @@ netdev_dpdk_vhost_update_tx_counters(struct 
> netdev_stats *stats,
>       for (i = 0; i < sent; i++) {
>           stats->tx_bytes += dp_packet_size(packets[i]);
>       }
> +
> +    sw_stats->tx_retries            += sw_stats_add->tx_retries;
> +    sw_stats->tx_failure_drops      += sw_stats_add->tx_failure_drops;
> +    sw_stats->tx_mtu_exceeded_drops += sw_stats_add->tx_mtu_exceeded_drops;
> +    sw_stats->tx_qos_drops          += sw_stats_add->tx_qos_drops;
>   }
>   
>   static void
> @@ -2382,12 +2395,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int 
> qid,
>   {
>       struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
>       struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
> -    struct netdev_dpdk_sw_stats *sw_stats = dev->sw_stats;
> -    unsigned int total_pkts = cnt;
> -    unsigned int dropped = 0;
> -    unsigned int tx_failure;
> -    unsigned int mtu_drops;
> -    unsigned int qos_drops;
> +    struct netdev_dpdk_sw_stats sw_stats_add;
> +    unsigned int n_packets_to_free = cnt;
> +    unsigned int total_packets = cnt;
>       int i, retries = 0;
>       int max_retries = VHOST_ENQ_RETRY_MIN;
>       int vid = netdev_dpdk_get_vid(dev);
> @@ -2408,12 +2418,14 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int 
> qid,
>       }
>   
>       cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
> -    mtu_drops = total_pkts - cnt;
> -    qos_drops = cnt;
> +    sw_stats_add.tx_mtu_exceeded_drops = total_packets - cnt;
> +
>       /* Check has QoS has been configured for the netdev */
> +    sw_stats_add.tx_qos_drops = cnt;
>       cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
> -    qos_drops -= cnt;
> -    dropped = qos_drops + mtu_drops;
> +    sw_stats_add.tx_qos_drops -= cnt;
> +
> +    n_packets_to_free = cnt;
>   
>       do {
>           int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
> @@ -2438,20 +2450,18 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int 
> qid,
>           }
>       } while (cnt && (retries++ < max_retries));
>   
> -    tx_failure = cnt;
>       rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
>   
> +    sw_stats_add.tx_failure_drops = cnt;
> +    sw_stats_add.tx_retries = MIN(retries, max_retries);
> +
>       rte_spinlock_lock(&dev->stats_lock);
> -    netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
> -                                         cnt + dropped);
> -    sw_stats->tx_retries += MIN(retries, max_retries);
> -    sw_stats->tx_failure_drops += tx_failure;
> -    sw_stats->tx_mtu_exceeded_drops += mtu_drops;
> -    sw_stats->tx_qos_drops += qos_drops;
> +    netdev_dpdk_vhost_update_tx_counters(dev, pkts, total_packets,
> +                                         &sw_stats_add);
>       rte_spinlock_unlock(&dev->stats_lock);
>   
>   out:
> -    for (i = 0; i < total_pkts - dropped; i++) {
> +    for (i = 0; i < n_packets_to_free; i++) {
>           dp_packet_delete(pkts[i]);
>       }
>   }
> ---
> 
> Best regards, Ilya Maximets.
> 

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to