Thanks Kevin

Darrell


On 8/24/17, 4:31 PM, "Kevin Traynor" <ktray...@redhat.com> wrote:

    Darrell, Thank you for the very thorough review. All look fine. Brief
    reply to comments inline.
    
    thanks,
    Kevin.
    
    On 08/24/2017 09:34 AM, Darrell Ball wrote:
    > 
    >     
    >     On 8/23/17, 6:34 AM, "Kevin Traynor" <ktray...@redhat.com> wrote:
    >     
    >         Add counters to dp_netdev_rxq which will later be used for 
storing the
    >         processing cycles of an rxq. Processing cycles will be stored in 
reference
    >         to a defined time interval. We will store the cycles of the 
current in progress
    >         interval, a number of completed intervals and the sum of the 
completed
    >         intervals.
    >         
    >         cycles_count_intermediate was used to count cycles for a pmd. 
With some small
    >         additions we can also use it to count the cycles used for 
processing an rxq.
    >         
    >         Signed-off-by: Kevin Traynor <ktray...@redhat.com>
    >         ---
    >          lib/dpif-netdev.c | 28 +++++++++++++++++++++++++---
    >          1 file changed, 25 insertions(+), 3 deletions(-)
    >         
    >         diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
    >         index f35c079..51d4050 100644
    >         --- a/lib/dpif-netdev.c
    >         +++ b/lib/dpif-netdev.c
    >         @@ -182,4 +182,8 @@ struct emc_cache {
    >          #define DPCLS_OPTIMIZATION_INTERVAL 1000
    >          
    >         +/* Number of intervals for which cycles are stored
    >         + * and used during rxq to pmd assignment. */
    >         +#define PMD_RXQ_INTERVAL_MAX 6
    >         +
    >          struct dpcls {
    >              struct cmap_node node;      /* Within 
dp_netdev_pmd_thread.classifiers */
    >         @@ -340,4 +344,13 @@ enum pmd_cycles_counter_type {
    >          };
    >          
    >         +enum rxq_cycles_counter_type {
    >         +    RXQ_CYCLES_PROC_CURR,       /* Cycles spent successfully 
polling and
    >         +                                   processing packets during the 
current
    >         +                                   interval. */
    >         +    RXQ_CYCLES_PROC_HIST,       /* Total cycles of all intervals 
that are used
    >         +                                   during rxq to pmd assignment. 
*/
    >         +    RXQ_N_CYCLES
    >         +};
    >         +
    >          #define XPS_TIMEOUT_MS 500LL
    >          
    >         @@ -351,4 +364,9 @@ struct dp_netdev_rxq {
    >                                                    particular core. */
    >              struct dp_netdev_pmd_thread *pmd;  /* pmd thread that polls 
this queue. */
    >         +
    >         +    /* Counters of cycles spent polling and processing packets. 
*/
    > 
    > Do we need to specify ‘successfully’ polling to avoid confusion ?
    > 
    
    Make sense, added
    
    >         +    atomic_ullong cycles[RXQ_N_CYCLES];
    > 
    > Do you think below deserves a separate description – ‘we store the last 
PMD_RXQ_INTERVAL_MAX  intervals and sum them up to 
    > to yield the cycles used for a given rxq…’ ?
    > 
    
    Yes, I added in the comment
    
    >         +    atomic_ullong cycles_intrvl[PMD_RXQ_INTERVAL_MAX];
    >         +    unsigned intrvl_idx;               /* Write index for 
'cycles_intrvl'. */
    >          };
    >          
    >         @@ -677,5 +695,4 @@ static void pmd_load_cached_ports(struct 
dp_netdev_pmd_thread *pmd)
    >          static inline void
    >          dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd);
    >         -
    >          static void
    >          dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread 
*pmd,
    >         @@ -3092,4 +3109,5 @@ cycles_count_end(struct 
dp_netdev_pmd_thread *pmd,
    >          static inline void
    >          cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
    >         +                          struct dp_netdev_rxq *rxq,
    >                                    enum pmd_cycles_counter_type type)
    >              OVS_NO_THREAD_SAFETY_ANALYSIS
    >         @@ -3100,4 +3118,8 @@ cycles_count_intermediate(struct 
dp_netdev_pmd_thread *pmd,
    >          
    >              non_atomic_ullong_add(&pmd->cycles.n[type], interval);
    >         +    if (rxq && (type == PMD_CYCLES_PROCESSING)) {
    >         +        /* Add to the amount of current processing cycles. */
    >         +        
non_atomic_ullong_add(&rxq->cycles[RXQ_CYCLES_PROC_CURR], interval);
    >         +    }
    >          }
    >          
    >         @@ -3668,5 +3690,5 @@ dpif_netdev_run(struct dpif *dpif)
    >                                                             
port->rxqs[i].rx,
    >                                                             
port->port_no);
    >         -                    cycles_count_intermediate(non_pmd, 
process_packets ?
    >         +                    cycles_count_intermediate(non_pmd, NULL, 
process_packets ?
    >                                                                 
PMD_CYCLES_PROCESSING
    >                                                               : 
PMD_CYCLES_IDLE);
    >         @@ -3855,5 +3877,5 @@ reload:
    >                          dp_netdev_process_rxq_port(pmd, 
poll_list[i].rxq->rx,
    >                                                     poll_list[i].port_no);
    >         -            cycles_count_intermediate(pmd,
    >         +            cycles_count_intermediate(pmd, NULL,
    >                                                process_packets ? 
PMD_CYCLES_PROCESSING
    >                                                                : 
PMD_CYCLES_IDLE);
    >         -- 
    >         1.8.3.1
    >         
    >         
    >     
    >     
    > 
    > 
    > 
    > 
    > 
    > _______________________________________________
    > dev mailing list
    > d...@openvswitch.org
    > 
https://urldefense.proofpoint.com/v2/url?u=https-3A__mail.openvswitch.org_mailman_listinfo_ovs-2Ddev&d=DwIDaQ&c=uilaK90D4TOVoH58JNXRgQ&r=BVhFA09CGX7JQ5Ih-uZnsw&m=-mDRbR3PQ60e7JiZnQmuGn7SjftwIw5rL9CBYOcGMDM&s=3D5-r2fFX7jrxc0KSIWJE29_e1uQ9ZSERMBMJe2VCVM&e=
 
    > 
    
    

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to