On Thu, 2021-10-21 at 03:24 +0000, Li, Xiaoyun wrote:
> Hi
> 
> > -----Original Message-----
> > From: Xueming Li <xuemi...@nvidia.com>
> > Sent: Wednesday, October 20, 2021 15:53
> > To: dev@dpdk.org; Zhang, Yuying <yuying.zh...@intel.com>
> > Cc: xuemi...@nvidia.com; Jerin Jacob <jerinjac...@gmail.com>; Yigit, Ferruh
> > <ferruh.yi...@intel.com>; Andrew Rybchenko
> > <andrew.rybche...@oktetlabs.ru>; Viacheslav Ovsiienko
> > <viachesl...@nvidia.com>; Thomas Monjalon <tho...@monjalon.net>; Lior
> > Margalit <lmarga...@nvidia.com>; Ananyev, Konstantin
> > <konstantin.anan...@intel.com>; Ajit Khaparde
> > <ajit.khapa...@broadcom.com>; Li, Xiaoyun <xiaoyun...@intel.com>
> > Subject: [PATCH v11 6/7] app/testpmd: force shared Rx queue polled on same
> > core
> > 
> > Shared Rx queue must be polled on same core. This patch checks and stops
> > forwarding if shared RxQ being scheduled on multiple cores.
> > 
> > It's suggested to use same number of Rx queues and polling cores.
> > 
> > Signed-off-by: Xueming Li <xuemi...@nvidia.com>
> > ---
> >  app/test-pmd/config.c  | 103
> > +++++++++++++++++++++++++++++++++++++++++
> >  app/test-pmd/testpmd.c |   4 +-
> >  app/test-pmd/testpmd.h |   2 +
> >  3 files changed, 108 insertions(+), 1 deletion(-)
> > 
> > diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index
> > fa951a86704..1f1307178be 100644
> > --- a/app/test-pmd/config.c
> > +++ b/app/test-pmd/config.c
> > @@ -2915,6 +2915,109 @@ port_rss_hash_key_update(portid_t port_id, char
> > rss_type[], uint8_t *hash_key,
> >     }
> >  }
> > 
> > +/*
> > + * Check whether a shared rxq scheduled on other lcores.
> > + */
> > +static bool
> > +fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
> > +                      portid_t src_port, queueid_t src_rxq,
> > +                      uint32_t share_group, queueid_t share_rxq) {
> > +   streamid_t sm_id;
> > +   streamid_t nb_fs_per_lcore;
> > +   lcoreid_t  nb_fc;
> > +   lcoreid_t  lc_id;
> > +   struct fwd_stream *fs;
> > +   struct rte_port *port;
> > +   struct rte_eth_dev_info *dev_info;
> > +   struct rte_eth_rxconf *rxq_conf;
> > +
> > +   nb_fc = cur_fwd_config.nb_fwd_lcores;
> > +   /* Check remaining cores. */
> > +   for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {
> > +           sm_id = fwd_lcores[lc_id]->stream_idx;
> > +           nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
> > +           for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
> > +                sm_id++) {
> > +                   fs = fwd_streams[sm_id];
> > +                   port = &ports[fs->rx_port];
> > +                   dev_info = &port->dev_info;
> > +                   rxq_conf = &port->rx_conf[fs->rx_queue];
> > +                   if ((dev_info->dev_capa &
> > RTE_ETH_DEV_CAPA_RXQ_SHARE)
> > +                       == 0)
> > +                           /* Not shared rxq. */
> > +                           continue;
> > +                   if (domain_id != port->dev_info.switch_info.domain_id)
> > +                           continue;
> > +                   if (rxq_conf->share_group != share_group)
> > +                           continue;
> > +                   if (rxq_conf->share_qid != share_rxq)
> > +                           continue;
> > +                   printf("Shared Rx queue group %u queue %hu can't be
> > scheduled on different cores:\n",
> > +                          share_group, share_rxq);
> > +                   printf("  lcore %hhu Port %hu queue %hu\n",
> > +                          src_lc, src_port, src_rxq);
> > +                   printf("  lcore %hhu Port %hu queue %hu\n",
> > +                          lc_id, fs->rx_port, fs->rx_queue);
> > +                   printf("Please use --nb-cores=%hu to limit number of
> > forwarding cores\n",
> > +                          nb_rxq);
> > +                   return true;
> > +           }
> > +   }
> > +   return false;
> > +}
> > +
> > +/*
> > + * Check shared rxq configuration.
> > + *
> > + * Shared group must not being scheduled on different core.
> > + */
> > +bool
> > +pkt_fwd_shared_rxq_check(void)
> > +{
> > +   streamid_t sm_id;
> > +   streamid_t nb_fs_per_lcore;
> > +   lcoreid_t  nb_fc;
> > +   lcoreid_t  lc_id;
> > +   struct fwd_stream *fs;
> > +   uint16_t domain_id;
> > +   struct rte_port *port;
> > +   struct rte_eth_dev_info *dev_info;
> > +   struct rte_eth_rxconf *rxq_conf;
> > +
> > +   nb_fc = cur_fwd_config.nb_fwd_lcores;
> > +   /*
> > +    * Check streams on each core, make sure the same switch domain +
> > +    * group + queue doesn't get scheduled on other cores.
> > +    */
> > +   for (lc_id = 0; lc_id < nb_fc; lc_id++) {
> > +           sm_id = fwd_lcores[lc_id]->stream_idx;
> > +           nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
> > +           for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
> > +                sm_id++) {
> > +                   fs = fwd_streams[sm_id];
> > +                   /* Update lcore info stream being scheduled. */
> > +                   fs->lcore = fwd_lcores[lc_id];
> > +                   port = &ports[fs->rx_port];
> > +                   dev_info = &port->dev_info;
> > +                   rxq_conf = &port->rx_conf[fs->rx_queue];
> > +                   if ((dev_info->dev_capa &
> > RTE_ETH_DEV_CAPA_RXQ_SHARE)
> > +                       == 0)
> > +                           /* Not shared rxq. */
> > +                           continue;
> > +                   /* Check shared rxq not scheduled on remaining cores.
> 
> The check will be done anyway just if dev has the capability of share_rxq.
> But what if user wants normal queue config when they are using the dev which 
> has the share_q capability?

Good catch, thanks!

> You should limit the check only when "rxq_share > 0".

Yes, will add this at top of this function.

> 
> > */
> > +                   domain_id = port->dev_info.switch_info.domain_id;
> > +                   if (fwd_stream_on_other_lcores(domain_id, lc_id,
> > +                                                  fs->rx_port,
> > +                                                  fs->rx_queue,
> > +                                                  rxq_conf->share_group,
> > +                                                  rxq_conf->share_qid))
> > +                           return false;
> > +           }
> > +   }
> > +   return true;
> > +}
> > +
> >  /*
> >   * Setup forwarding configuration for each logical core.
> >   */
> > diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> > 123142ed110..f3f81ef561f 100644
> > --- a/app/test-pmd/testpmd.c
> > +++ b/app/test-pmd/testpmd.c
> > @@ -2236,10 +2236,12 @@ start_packet_forwarding(int with_tx_first)
> > 
> >     fwd_config_setup();
> > 
> > +   pkt_fwd_config_display(&cur_fwd_config);
> > +   if (!pkt_fwd_shared_rxq_check())
> 
> Same comment as above
> This check should only happens if user enables "--rxq-share=[X]".
> You can limit the check here too.
> If (rxq_share > 0 && !pkt_fwd_shared_rxq_check())

I will add rxq_share > 0 check at begining of
pk_fwd_shared_rxq_check(), thanks!

> 
> > +           return;
> >     if(!no_flush_rx)
> >             flush_fwd_rx_queues();
> > 
> > -   pkt_fwd_config_display(&cur_fwd_config);
> >     rxtx_config_display();
> > 
> >     fwd_stats_reset();
> > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> > 3dfaaad94c0..f121a2da90c 100644
> > --- a/app/test-pmd/testpmd.h
> > +++ b/app/test-pmd/testpmd.h
> > @@ -144,6 +144,7 @@ struct fwd_stream {
> >     uint64_t     core_cycles; /**< used for RX and TX processing */
> >     struct pkt_burst_stats rx_burst_stats;
> >     struct pkt_burst_stats tx_burst_stats;
> > +   struct fwd_lcore *lcore; /**< Lcore being scheduled. */
> >  };
> > 
> >  /**
> > @@ -795,6 +796,7 @@ void port_summary_header_display(void);
> >  void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);  void
> > tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);  void
> > fwd_lcores_config_display(void);
> > +bool pkt_fwd_shared_rxq_check(void);
> >  void pkt_fwd_config_display(struct fwd_config *cfg);  void
> > rxtx_config_display(void);  void fwd_config_setup(void);
> > --
> > 2.33.0
> 

Reply via email to