Hi Ilya,

I fully agree with storing 'now' as part of the pmd struct instead of passing 
it around as function arguments.

For me struct dp_netdev_pmd_thread is *the* the PMD thread context in 
dpif-netdev. I don't really see the benefit of creating a sub-struct 
dp_netdev_pmd_thread_ctx and moving certain data into that. Can you explain 
your motivation?

BR, Jan

> -----Original Message-----
> From: [email protected] 
> [mailto:[email protected]] On Behalf Of Ilya Maximets
> Sent: Thursday, 10 August, 2017 18:55
> To: [email protected]
> Cc: Ilya Maximets <[email protected]>; Heetae Ahn 
> <[email protected]>
> Subject: [ovs-dev] [PATCH 1/2] dpif-netdev: Keep latest measured time for PMD 
> thread.
> 
> In current implementation 'now' variable updated once on each
> receive cycle and passed through the whole datapath via function
> arguments. It'll be better to keep this variable inside PMD
> thread structure to be able to get it at any time. Such solution
> will save the stack memory and simplify possible modifications
> in current logic.
> 
> This patch introduces new structure 'dp_netdev_pmd_thread_ctx'
> contained by 'struct dp_netdev_pmd_thread' to store any processing
> context of this PMD thread. For now, only time and cycles moved to
> that structure. Can be extended in the future.
> 
> Signed-off-by: Ilya Maximets <[email protected]>
> ---
> 
> Will be used in the next patch to not pass the probability
> through the whole datapath.
> 
>  lib/dpif-netdev.c | 117 
> ++++++++++++++++++++++++++++++------------------------
>  1 file changed, 65 insertions(+), 52 deletions(-)
> 
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index e2cd931..6d42393 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -504,6 +504,16 @@ struct tx_port {
>      struct hmap_node node;
>  };
> 
> +/* Contained by struct dp_netdev_pmd_thread's 'ctx' member. */
> +struct dp_netdev_pmd_thread_ctx {
> +    /* Latest measured time. */
> +    long long now;
> +    /* Used to count cycles. See 'cycles_count_end()' */
> +    unsigned long long last_cycles;
> +};
> +
> +static void pmd_thread_ctx_time_update(struct dp_netdev_pmd_thread *);
> +
>  /* PMD: Poll modes drivers.  PMD accesses devices via polling to eliminate
>   * the performance overhead of interrupt processing.  Therefore netdev can
>   * not implement rx-wait for these devices.  dpif-netdev needs to poll
> @@ -556,8 +566,8 @@ struct dp_netdev_pmd_thread {
>      /* Cycles counters */
>      struct dp_netdev_pmd_cycles cycles;
> 
> -    /* Used to count cicles. See 'cycles_counter_end()' */
> -    unsigned long long last_cycles;
> +    /* Current context of the PMD thread. */
> +    struct dp_netdev_pmd_thread_ctx ctx;
> 
>      struct latch exit_latch;        /* For terminating the pmd thread. */
>      struct seq *reload_seq;
> @@ -630,8 +640,7 @@ static void dp_netdev_execute_actions(struct 
> dp_netdev_pmd_thread *pmd,
>                                        struct dp_packet_batch *,
>                                        bool may_steal, const struct flow 
> *flow,
>                                        const struct nlattr *actions,
> -                                      size_t actions_len,
> -                                      long long now);
> +                                      size_t actions_len);
>  static void dp_netdev_input(struct dp_netdev_pmd_thread *,
>                              struct dp_packet_batch *, odp_port_t port_no);
>  static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
> @@ -679,9 +688,9 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread 
> *pmd);
> 
>  static void
>  dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
> -                               long long now, bool purge);
> +                               bool purge);
>  static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
> -                                      struct tx_port *tx, long long now);
> +                                      struct tx_port *tx);
> 
>  static inline bool emc_entry_alive(struct emc_entry *ce);
>  static void emc_clear_entry(struct emc_entry *ce);
> @@ -723,6 +732,12 @@ emc_cache_slow_sweep(struct emc_cache *flow_cache)
>      flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
>  }
> 
> +static inline void
> +pmd_thread_ctx_time_update(struct dp_netdev_pmd_thread *pmd)
> +{
> +    pmd->ctx.now = time_msec();
> +}
> +
>  /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
>  bool
>  dpif_is_netdev(const struct dpif *dpif)
> @@ -2839,6 +2854,9 @@ dpif_netdev_execute(struct dpif *dpif, struct 
> dpif_execute *execute)
>          ovs_mutex_lock(&dp->non_pmd_mutex);
>      }
> 
> +    /* Update current time in PMD context. */
> +    pmd_thread_ctx_time_update(pmd);
> +
>      /* The action processing expects the RSS hash to be valid, because
>       * it's always initialized at the beginning of datapath processing.
>       * In this case, though, 'execute->packet' may not have gone through
> @@ -2851,8 +2869,7 @@ dpif_netdev_execute(struct dpif *dpif, struct 
> dpif_execute *execute)
> 
>      dp_packet_batch_init_packet(&pp, execute->packet);
>      dp_netdev_execute_actions(pmd, &pp, false, execute->flow,
> -                              execute->actions, execute->actions_len,
> -                              time_msec());
> +                              execute->actions, execute->actions_len);
> 
>      if (pmd->core_id == NON_PMD_CORE_ID) {
>          ovs_mutex_unlock(&dp->non_pmd_mutex);
> @@ -3073,7 +3090,7 @@ cycles_count_start(struct dp_netdev_pmd_thread *pmd)
>      OVS_ACQUIRES(&cycles_counter_fake_mutex)
>      OVS_NO_THREAD_SAFETY_ANALYSIS
>  {
> -    pmd->last_cycles = cycles_counter();
> +    pmd->ctx.last_cycles = cycles_counter();
>  }
> 
>  /* Stop counting cycles and add them to the counter 'type' */
> @@ -3083,7 +3100,7 @@ cycles_count_end(struct dp_netdev_pmd_thread *pmd,
>      OVS_RELEASES(&cycles_counter_fake_mutex)
>      OVS_NO_THREAD_SAFETY_ANALYSIS
>  {
> -    unsigned long long interval = cycles_counter() - pmd->last_cycles;
> +    unsigned long long interval = cycles_counter() - pmd->ctx.last_cycles;
> 
>      non_atomic_ullong_add(&pmd->cycles.n[type], interval);
>  }
> @@ -3095,8 +3112,8 @@ cycles_count_intermediate(struct dp_netdev_pmd_thread 
> *pmd,
>      OVS_NO_THREAD_SAFETY_ANALYSIS
>  {
>      unsigned long long new_cycles = cycles_counter();
> -    unsigned long long interval = new_cycles - pmd->last_cycles;
> -    pmd->last_cycles = new_cycles;
> +    unsigned long long interval = new_cycles - pmd->ctx.last_cycles;
> +    pmd->ctx.last_cycles = new_cycles;
> 
>      non_atomic_ullong_add(&pmd->cycles.n[type], interval);
>  }
> @@ -3674,7 +3691,8 @@ dpif_netdev_run(struct dpif *dpif)
>              }
>          }
>          cycles_count_end(non_pmd, PMD_CYCLES_IDLE);
> -        dpif_netdev_xps_revalidate_pmd(non_pmd, time_msec(), false);
> +        pmd_thread_ctx_time_update(non_pmd);
> +        dpif_netdev_xps_revalidate_pmd(non_pmd, false);
>          ovs_mutex_unlock(&dp->non_pmd_mutex);
> 
>          dp_netdev_pmd_unref(non_pmd);
> @@ -3725,7 +3743,7 @@ pmd_free_cached_ports(struct dp_netdev_pmd_thread *pmd)
>      struct tx_port *tx_port_cached;
> 
>      /* Free all used tx queue ids. */
> -    dpif_netdev_xps_revalidate_pmd(pmd, 0, true);
> +    dpif_netdev_xps_revalidate_pmd(pmd, true);
> 
>      HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->tnl_port_cache) {
>          free(tx_port_cached);
> @@ -4308,7 +4326,8 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread 
> *pmd, struct dp_netdev *dp,
>      ovs_mutex_init(&pmd->port_mutex);
>      cmap_init(&pmd->flow_table);
>      cmap_init(&pmd->classifiers);
> -    pmd->next_optimization = time_msec() + DPCLS_OPTIMIZATION_INTERVAL;
> +    pmd_thread_ctx_time_update(pmd);
> +    pmd->next_optimization = pmd->ctx.now + DPCLS_OPTIMIZATION_INTERVAL;
>      hmap_init(&pmd->poll_list);
>      hmap_init(&pmd->tx_ports);
>      hmap_init(&pmd->tnl_port_cache);
> @@ -4621,19 +4640,18 @@ packet_batch_per_flow_init(struct 
> packet_batch_per_flow *batch,
> 
>  static inline void
>  packet_batch_per_flow_execute(struct packet_batch_per_flow *batch,
> -                              struct dp_netdev_pmd_thread *pmd,
> -                              long long now)
> +                              struct dp_netdev_pmd_thread *pmd)
>  {
>      struct dp_netdev_actions *actions;
>      struct dp_netdev_flow *flow = batch->flow;
> 
>      dp_netdev_flow_used(flow, batch->array.count, batch->byte_count,
> -                        batch->tcp_flags, now);
> +                        batch->tcp_flags, pmd->ctx.now);
> 
>      actions = dp_netdev_flow_get_actions(flow);
> 
>      dp_netdev_execute_actions(pmd, &batch->array, true, &flow->flow,
> -                              actions->actions, actions->size, now);
> +                              actions->actions, actions->size);
>  }
> 
>  static inline void
> @@ -4730,7 +4748,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
>                       struct dp_packet *packet,
>                       const struct netdev_flow_key *key,
>                       struct ofpbuf *actions, struct ofpbuf *put_actions,
> -                     int *lost_cnt, long long now)
> +                     int *lost_cnt)
>  {
>      struct ofpbuf *add_actions;
>      struct dp_packet_batch b;
> @@ -4769,7 +4787,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
>       * we'll send the packet up twice. */
>      dp_packet_batch_init_packet(&b, packet);
>      dp_netdev_execute_actions(pmd, &b, true, &match.flow,
> -                              actions->data, actions->size, now);
> +                              actions->data, actions->size);
> 
>      add_actions = put_actions->size ? put_actions : actions;
>      if (OVS_LIKELY(error != ENOSPC)) {
> @@ -4797,9 +4815,9 @@ static inline void
>  fast_path_processing(struct dp_netdev_pmd_thread *pmd,
>                       struct dp_packet_batch *packets_,
>                       struct netdev_flow_key *keys,
> -                     struct packet_batch_per_flow batches[], size_t 
> *n_batches,
> -                     odp_port_t in_port,
> -                     long long now)
> +                     struct packet_batch_per_flow batches[],
> +                     size_t *n_batches,
> +                     odp_port_t in_port)
>  {
>      int cnt = packets_->count;
>  #if !defined(__CHECKER__) && !defined(_WIN32)
> @@ -4856,7 +4874,7 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd,
> 
>              miss_cnt++;
>              handle_packet_upcall(pmd, packets[i], &keys[i], &actions,
> -                                 &put_actions, &lost_cnt, now);
> +                                 &put_actions, &lost_cnt);
>          }
> 
>          ofpbuf_uninit(&actions);
> @@ -4913,18 +4931,19 @@ dp_netdev_input__(struct dp_netdev_pmd_thread *pmd,
>      OVS_ALIGNED_VAR(CACHE_LINE_SIZE)
>          struct netdev_flow_key keys[PKT_ARRAY_SIZE];
>      struct packet_batch_per_flow batches[PKT_ARRAY_SIZE];
> -    long long now = time_msec();
>      size_t n_batches;
>      odp_port_t in_port;
> 
> +    pmd_thread_ctx_time_update(pmd);
> +
>      n_batches = 0;
>      emc_processing(pmd, packets, keys, batches, &n_batches,
>                              md_is_valid, port_no);
>      if (!dp_packet_batch_is_empty(packets)) {
>          /* Get ingress port from first packet's metadata. */
>          in_port = packets->packets[0]->md.in_port.odp_port;
> -        fast_path_processing(pmd, packets, keys, batches, &n_batches,
> -                             in_port, now);
> +        fast_path_processing(pmd, packets, keys,
> +                             batches, &n_batches, in_port);
>      }
> 
>      /* All the flow batches need to be reset before any call to
> @@ -4942,7 +4961,7 @@ dp_netdev_input__(struct dp_netdev_pmd_thread *pmd,
>      }
> 
>      for (i = 0; i < n_batches; i++) {
> -        packet_batch_per_flow_execute(&batches[i], pmd, now);
> +        packet_batch_per_flow_execute(&batches[i], pmd);
>      }
>  }
> 
> @@ -4963,7 +4982,6 @@ dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
> 
>  struct dp_netdev_execute_aux {
>      struct dp_netdev_pmd_thread *pmd;
> -    long long now;
>      const struct flow *flow;
>  };
> 
> @@ -4987,7 +5005,7 @@ dpif_netdev_register_upcall_cb(struct dpif *dpif, 
> upcall_callback *cb,
> 
>  static void
>  dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
> -                               long long now, bool purge)
> +                               bool purge)
>  {
>      struct tx_port *tx;
>      struct dp_netdev_port *port;
> @@ -4997,7 +5015,7 @@ dpif_netdev_xps_revalidate_pmd(const struct 
> dp_netdev_pmd_thread *pmd,
>          if (!tx->port->dynamic_txqs) {
>              continue;
>          }
> -        interval = now - tx->last_used;
> +        interval = pmd->ctx.now - tx->last_used;
>          if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT_MS)) {
>              port = tx->port;
>              ovs_mutex_lock(&port->txq_used_mutex);
> @@ -5010,18 +5028,14 @@ dpif_netdev_xps_revalidate_pmd(const struct 
> dp_netdev_pmd_thread *pmd,
> 
>  static int
>  dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd,
> -                           struct tx_port *tx, long long now)
> +                           struct tx_port *tx)
>  {
>      struct dp_netdev_port *port;
>      long long interval;
>      int i, min_cnt, min_qid;
> 
> -    if (OVS_UNLIKELY(!now)) {
> -        now = time_msec();
> -    }
> -
> -    interval = now - tx->last_used;
> -    tx->last_used = now;
> +    interval = pmd->ctx.now - tx->last_used;
> +    tx->last_used = pmd->ctx.now;
> 
>      if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT_MS)) {
>          return tx->qid;
> @@ -5049,7 +5063,7 @@ dpif_netdev_xps_get_tx_qid(const struct 
> dp_netdev_pmd_thread *pmd,
> 
>      ovs_mutex_unlock(&port->txq_used_mutex);
> 
> -    dpif_netdev_xps_revalidate_pmd(pmd, now, false);
> +    dpif_netdev_xps_revalidate_pmd(pmd, false);
> 
>      VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.",
>               pmd->core_id, tx->qid, netdev_get_name(tx->port->netdev));
> @@ -5100,7 +5114,7 @@ dp_execute_userspace_action(struct dp_netdev_pmd_thread 
> *pmd,
>                              struct dp_packet *packet, bool may_steal,
>                              struct flow *flow, ovs_u128 *ufid,
>                              struct ofpbuf *actions,
> -                            const struct nlattr *userdata, long long now)
> +                            const struct nlattr *userdata)
>  {
>      struct dp_packet_batch b;
>      int error;
> @@ -5113,7 +5127,7 @@ dp_execute_userspace_action(struct dp_netdev_pmd_thread 
> *pmd,
>      if (!error || error == ENOSPC) {
>          dp_packet_batch_init_packet(&b, packet);
>          dp_netdev_execute_actions(pmd, &b, may_steal, flow,
> -                                  actions->data, actions->size, now);
> +                                  actions->data, actions->size);
>      } else if (may_steal) {
>          dp_packet_delete(packet);
>      }
> @@ -5129,7 +5143,6 @@ dp_execute_cb(void *aux_, struct dp_packet_batch 
> *packets_,
>      struct dp_netdev_pmd_thread *pmd = aux->pmd;
>      struct dp_netdev *dp = pmd->dp;
>      int type = nl_attr_type(a);
> -    long long now = aux->now;
>      struct tx_port *p;
> 
>      switch ((enum ovs_action_attr)type) {
> @@ -5141,7 +5154,7 @@ dp_execute_cb(void *aux_, struct dp_packet_batch 
> *packets_,
> 
>              dynamic_txqs = p->port->dynamic_txqs;
>              if (dynamic_txqs) {
> -                tx_qid = dpif_netdev_xps_get_tx_qid(pmd, p, now);
> +                tx_qid = dpif_netdev_xps_get_tx_qid(pmd, p);
>              } else {
>                  tx_qid = pmd->static_tx_qid;
>              }
> @@ -5224,7 +5237,7 @@ dp_execute_cb(void *aux_, struct dp_packet_batch 
> *packets_,
>                  flow_extract(packet, &flow);
>                  dpif_flow_hash(dp->dpif, &flow, sizeof flow, &ufid);
>                  dp_execute_userspace_action(pmd, packet, may_steal, &flow,
> -                                            &ufid, &actions, userdata, now);
> +                                            &ufid, &actions, userdata);
>              }
> 
>              if (clone) {
> @@ -5391,7 +5404,7 @@ dp_execute_cb(void *aux_, struct dp_packet_batch 
> *packets_,
> 
>      case OVS_ACTION_ATTR_METER:
>          dp_netdev_run_meter(pmd->dp, packets_, nl_attr_get_u32(a),
> -                            time_msec());
> +                            pmd->ctx.now);
>          break;
> 
>      case OVS_ACTION_ATTR_PUSH_VLAN:
> @@ -5420,10 +5433,9 @@ static void
>  dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
>                            struct dp_packet_batch *packets,
>                            bool may_steal, const struct flow *flow,
> -                          const struct nlattr *actions, size_t actions_len,
> -                          long long now)
> +                          const struct nlattr *actions, size_t actions_len)
>  {
> -    struct dp_netdev_execute_aux aux = { pmd, now, flow };
> +    struct dp_netdev_execute_aux aux = { pmd, flow };
> 
>      odp_execute_actions(&aux, packets, may_steal, actions,
>                          actions_len, dp_execute_cb);
> @@ -5749,9 +5761,9 @@ static inline void
>  dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd)
>  {
>      struct dpcls *cls;
> -    long long int now = time_msec();
> 
> -    if (now > pmd->next_optimization) {
> +    pmd_thread_ctx_time_update(pmd);
> +    if (pmd->ctx.now > pmd->next_optimization) {
>          /* Try to obtain the flow lock to block out revalidator threads.
>           * If not possible, just try next time. */
>          if (!ovs_mutex_trylock(&pmd->flow_mutex)) {
> @@ -5761,7 +5773,8 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread 
> *pmd)
>              }
>              ovs_mutex_unlock(&pmd->flow_mutex);
>              /* Start new measuring interval */
> -            pmd->next_optimization = now + DPCLS_OPTIMIZATION_INTERVAL;
> +            pmd->next_optimization = pmd->ctx.now
> +                                     + DPCLS_OPTIMIZATION_INTERVAL;
>          }
>      }
>  }
> --
> 2.7.4
> 
> _______________________________________________
> dev mailing list
> [email protected]
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to