Re: [ovs-dev] [RFC v3 PATCH 3/5] dpif-netdev: Skip encap action during datapath execution

2020-06-29 Thread Sriharsha Basavapatna via dev
On Tue, Jun 2, 2020 at 11:47 AM Eli Britstein  wrote:
>
>
> On 6/1/2020 8:29 PM, Sriharsha Basavapatna wrote:
> > On Mon, Jun 1, 2020 at 9:18 PM Eli Britstein  wrote:
> >>
> >> On 6/1/2020 6:15 PM, Sriharsha Basavapatna wrote:
> >>> On Mon, Jun 1, 2020 at 7:58 PM Eli Britstein  wrote:
>  On 5/28/2020 11:19 AM, Sriharsha Basavapatna wrote:
> > In this patch we check if action processing (apart from OUTPUT action),
> > should be skipped for a given dp_netdev_flow. Specifically, we check if
> > the action is TNL_PUSH and if it has been offloaded to HW, then we do 
> > not
> > push the tunnel header in SW. The datapath only executes the OUTPUT 
> > action.
> > The packet will be encapsulated in HW during transmit.
> >
> > Signed-off-by: Sriharsha Basavapatna 
> > 
> > ---
> > lib/dpif-netdev.c | 46 
> > --
> > 1 file changed, 36 insertions(+), 10 deletions(-)
> >
> > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> > index 781b233f4..3e175c25e 100644
> > --- a/lib/dpif-netdev.c
> > +++ b/lib/dpif-netdev.c
> > @@ -112,6 +112,7 @@ COVERAGE_DEFINE(datapath_drop_recirc_error);
> > COVERAGE_DEFINE(datapath_drop_invalid_port);
> > COVERAGE_DEFINE(datapath_drop_invalid_tnl_port);
> > COVERAGE_DEFINE(datapath_drop_rx_invalid_packet);
> > +COVERAGE_DEFINE(datapath_skip_tunnel_push);
> >
> > /* Protects against changes to 'dp_netdevs'. */
> > static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
> > @@ -538,6 +539,16 @@ struct dp_netdev_flow {
> > bool dead;
> > uint32_t mark;   /* Unique flow mark assigned to a 
> > flow */
> >
> > +/* The next two members are used to support partial offloading of
> > + * actions. The boolean flag tells if this flow has its actions 
> > partially
> > + * offloaded. The egress port# tells if the action should be 
> > offloaded
> > + * on the egress (output) port instead of the in-port for the 
> > flow. Note
> > + * that we support flows with a single egress port action.
> > + * (see MAX_ACTION_ATTRS for related comments).
> > + */
> > +bool partial_actions_offloaded;
> > +odp_port_t  egress_offload_port;
> > +
> > /* Statistics. */
> > struct dp_netdev_flow_stats stats;
> >
> > @@ -791,7 +802,8 @@ static void dp_netdev_execute_actions(struct 
> > dp_netdev_pmd_thread *pmd,
> >   bool should_steal,
> >   const struct flow *flow,
> >   const struct nlattr *actions,
> > -  size_t actions_len);
> > +  size_t actions_len,
> > +  const struct dp_netdev_flow 
> > *dp_flow);
> > static void dp_netdev_input(struct dp_netdev_pmd_thread *,
> > struct dp_packet_batch *, odp_port_t 
> > port_no);
> > static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
> > @@ -3828,7 +3840,7 @@ dpif_netdev_execute(struct dpif *dpif, struct 
> > dpif_execute *execute)
> > dp_packet_batch_init_packet(, execute->packet);
> > pp.do_not_steal = true;
> > dp_netdev_execute_actions(pmd, , false, execute->flow,
> > -  execute->actions, execute->actions_len);
> > +  execute->actions, execute->actions_len, 
> > NULL);
> > dp_netdev_pmd_flush_output_packets(pmd, true);
> >
> > if (pmd->core_id == NON_PMD_CORE_ID) {
> > @@ -6456,7 +6468,7 @@ packet_batch_per_flow_execute(struct 
> > packet_batch_per_flow *batch,
> > actions = dp_netdev_flow_get_actions(flow);
> >
> > dp_netdev_execute_actions(pmd, >array, true, >flow,
> > -  actions->actions, actions->size);
> > +  actions->actions, actions->size, flow);
> > }
> >
> > static inline void
> > @@ -6764,7 +6776,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread 
> > *pmd,
> >  * we'll send the packet up twice. */
> > dp_packet_batch_init_packet(, packet);
> > dp_netdev_execute_actions(pmd, , true, ,
> > -  actions->data, actions->size);
> > +  actions->data, actions->size, NULL);
> >
> > add_actions = put_actions->size ? put_actions : actions;
> > if (OVS_LIKELY(error != ENOSPC)) {
> > @@ -6999,6 +7011,7 @@ dp_netdev_recirculate(struct dp_netdev_pmd_thread 
> > *pmd,
> > struct 

Re: [ovs-dev] [RFC v3 PATCH 3/5] dpif-netdev: Skip encap action during datapath execution

2020-06-02 Thread Eli Britstein



On 6/1/2020 8:29 PM, Sriharsha Basavapatna wrote:

On Mon, Jun 1, 2020 at 9:18 PM Eli Britstein  wrote:


On 6/1/2020 6:15 PM, Sriharsha Basavapatna wrote:

On Mon, Jun 1, 2020 at 7:58 PM Eli Britstein  wrote:

On 5/28/2020 11:19 AM, Sriharsha Basavapatna wrote:

In this patch we check if action processing (apart from OUTPUT action),
should be skipped for a given dp_netdev_flow. Specifically, we check if
the action is TNL_PUSH and if it has been offloaded to HW, then we do not
push the tunnel header in SW. The datapath only executes the OUTPUT action.
The packet will be encapsulated in HW during transmit.

Signed-off-by: Sriharsha Basavapatna 
---
lib/dpif-netdev.c | 46 --
1 file changed, 36 insertions(+), 10 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 781b233f4..3e175c25e 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -112,6 +112,7 @@ COVERAGE_DEFINE(datapath_drop_recirc_error);
COVERAGE_DEFINE(datapath_drop_invalid_port);
COVERAGE_DEFINE(datapath_drop_invalid_tnl_port);
COVERAGE_DEFINE(datapath_drop_rx_invalid_packet);
+COVERAGE_DEFINE(datapath_skip_tunnel_push);

/* Protects against changes to 'dp_netdevs'. */
static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
@@ -538,6 +539,16 @@ struct dp_netdev_flow {
bool dead;
uint32_t mark;   /* Unique flow mark assigned to a flow */

+/* The next two members are used to support partial offloading of
+ * actions. The boolean flag tells if this flow has its actions partially
+ * offloaded. The egress port# tells if the action should be offloaded
+ * on the egress (output) port instead of the in-port for the flow. Note
+ * that we support flows with a single egress port action.
+ * (see MAX_ACTION_ATTRS for related comments).
+ */
+bool partial_actions_offloaded;
+odp_port_t  egress_offload_port;
+
/* Statistics. */
struct dp_netdev_flow_stats stats;

@@ -791,7 +802,8 @@ static void dp_netdev_execute_actions(struct 
dp_netdev_pmd_thread *pmd,
  bool should_steal,
  const struct flow *flow,
  const struct nlattr *actions,
-  size_t actions_len);
+  size_t actions_len,
+  const struct dp_netdev_flow *dp_flow);
static void dp_netdev_input(struct dp_netdev_pmd_thread *,
struct dp_packet_batch *, odp_port_t port_no);
static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
@@ -3828,7 +3840,7 @@ dpif_netdev_execute(struct dpif *dpif, struct 
dpif_execute *execute)
dp_packet_batch_init_packet(, execute->packet);
pp.do_not_steal = true;
dp_netdev_execute_actions(pmd, , false, execute->flow,
-  execute->actions, execute->actions_len);
+  execute->actions, execute->actions_len, NULL);
dp_netdev_pmd_flush_output_packets(pmd, true);

if (pmd->core_id == NON_PMD_CORE_ID) {
@@ -6456,7 +6468,7 @@ packet_batch_per_flow_execute(struct 
packet_batch_per_flow *batch,
actions = dp_netdev_flow_get_actions(flow);

dp_netdev_execute_actions(pmd, >array, true, >flow,
-  actions->actions, actions->size);
+  actions->actions, actions->size, flow);
}

static inline void
@@ -6764,7 +6776,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
 * we'll send the packet up twice. */
dp_packet_batch_init_packet(, packet);
dp_netdev_execute_actions(pmd, , true, ,
-  actions->data, actions->size);
+  actions->data, actions->size, NULL);

add_actions = put_actions->size ? put_actions : actions;
if (OVS_LIKELY(error != ENOSPC)) {
@@ -6999,6 +7011,7 @@ dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_execute_aux {
struct dp_netdev_pmd_thread *pmd;
const struct flow *flow;
+const struct dp_netdev_flow *dp_flow;/* for partial action offload */
};

static void
@@ -7143,7 +7156,7 @@ dp_execute_userspace_action(struct dp_netdev_pmd_thread 
*pmd,
if (!error || error == ENOSPC) {
dp_packet_batch_init_packet(, packet);
dp_netdev_execute_actions(pmd, , should_steal, flow,
-  actions->data, actions->size);
+  actions->data, actions->size, NULL);
} else if (should_steal) {
dp_packet_delete(packet);
COVERAGE_INC(datapath_drop_userspace_action_error);
@@ -7162,6 +7175,7 @@ dp_execute_cb(void *aux_, struct dp_packet_batch 
*packets_,
int type = nl_attr_type(a);
struct 

Re: [ovs-dev] [RFC v3 PATCH 3/5] dpif-netdev: Skip encap action during datapath execution

2020-06-01 Thread Sriharsha Basavapatna via dev
On Mon, Jun 1, 2020 at 9:18 PM Eli Britstein  wrote:
>
>
> On 6/1/2020 6:15 PM, Sriharsha Basavapatna wrote:
> > On Mon, Jun 1, 2020 at 7:58 PM Eli Britstein  wrote:
> >>
> >> On 5/28/2020 11:19 AM, Sriharsha Basavapatna wrote:
> >>> In this patch we check if action processing (apart from OUTPUT action),
> >>> should be skipped for a given dp_netdev_flow. Specifically, we check if
> >>> the action is TNL_PUSH and if it has been offloaded to HW, then we do not
> >>> push the tunnel header in SW. The datapath only executes the OUTPUT 
> >>> action.
> >>> The packet will be encapsulated in HW during transmit.
> >>>
> >>> Signed-off-by: Sriharsha Basavapatna 
> >>> ---
> >>>lib/dpif-netdev.c | 46 --
> >>>1 file changed, 36 insertions(+), 10 deletions(-)
> >>>
> >>> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> >>> index 781b233f4..3e175c25e 100644
> >>> --- a/lib/dpif-netdev.c
> >>> +++ b/lib/dpif-netdev.c
> >>> @@ -112,6 +112,7 @@ COVERAGE_DEFINE(datapath_drop_recirc_error);
> >>>COVERAGE_DEFINE(datapath_drop_invalid_port);
> >>>COVERAGE_DEFINE(datapath_drop_invalid_tnl_port);
> >>>COVERAGE_DEFINE(datapath_drop_rx_invalid_packet);
> >>> +COVERAGE_DEFINE(datapath_skip_tunnel_push);
> >>>
> >>>/* Protects against changes to 'dp_netdevs'. */
> >>>static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
> >>> @@ -538,6 +539,16 @@ struct dp_netdev_flow {
> >>>bool dead;
> >>>uint32_t mark;   /* Unique flow mark assigned to a 
> >>> flow */
> >>>
> >>> +/* The next two members are used to support partial offloading of
> >>> + * actions. The boolean flag tells if this flow has its actions 
> >>> partially
> >>> + * offloaded. The egress port# tells if the action should be 
> >>> offloaded
> >>> + * on the egress (output) port instead of the in-port for the flow. 
> >>> Note
> >>> + * that we support flows with a single egress port action.
> >>> + * (see MAX_ACTION_ATTRS for related comments).
> >>> + */
> >>> +bool partial_actions_offloaded;
> >>> +odp_port_t  egress_offload_port;
> >>> +
> >>>/* Statistics. */
> >>>struct dp_netdev_flow_stats stats;
> >>>
> >>> @@ -791,7 +802,8 @@ static void dp_netdev_execute_actions(struct 
> >>> dp_netdev_pmd_thread *pmd,
> >>>  bool should_steal,
> >>>  const struct flow *flow,
> >>>  const struct nlattr *actions,
> >>> -  size_t actions_len);
> >>> +  size_t actions_len,
> >>> +  const struct dp_netdev_flow 
> >>> *dp_flow);
> >>>static void dp_netdev_input(struct dp_netdev_pmd_thread *,
> >>>struct dp_packet_batch *, odp_port_t 
> >>> port_no);
> >>>static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
> >>> @@ -3828,7 +3840,7 @@ dpif_netdev_execute(struct dpif *dpif, struct 
> >>> dpif_execute *execute)
> >>>dp_packet_batch_init_packet(, execute->packet);
> >>>pp.do_not_steal = true;
> >>>dp_netdev_execute_actions(pmd, , false, execute->flow,
> >>> -  execute->actions, execute->actions_len);
> >>> +  execute->actions, execute->actions_len, 
> >>> NULL);
> >>>dp_netdev_pmd_flush_output_packets(pmd, true);
> >>>
> >>>if (pmd->core_id == NON_PMD_CORE_ID) {
> >>> @@ -6456,7 +6468,7 @@ packet_batch_per_flow_execute(struct 
> >>> packet_batch_per_flow *batch,
> >>>actions = dp_netdev_flow_get_actions(flow);
> >>>
> >>>dp_netdev_execute_actions(pmd, >array, true, >flow,
> >>> -  actions->actions, actions->size);
> >>> +  actions->actions, actions->size, flow);
> >>>}
> >>>
> >>>static inline void
> >>> @@ -6764,7 +6776,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread 
> >>> *pmd,
> >>> * we'll send the packet up twice. */
> >>>dp_packet_batch_init_packet(, packet);
> >>>dp_netdev_execute_actions(pmd, , true, ,
> >>> -  actions->data, actions->size);
> >>> +  actions->data, actions->size, NULL);
> >>>
> >>>add_actions = put_actions->size ? put_actions : actions;
> >>>if (OVS_LIKELY(error != ENOSPC)) {
> >>> @@ -6999,6 +7011,7 @@ dp_netdev_recirculate(struct dp_netdev_pmd_thread 
> >>> *pmd,
> >>>struct dp_netdev_execute_aux {
> >>>struct dp_netdev_pmd_thread *pmd;
> >>>const struct flow *flow;
> >>> +const struct dp_netdev_flow *dp_flow;/* for partial action 
> >>> offload */
> >>>};
> >>>
> >>>static void
> >>> @@ -7143,7 +7156,7 @@ dp_execute_userspace_action(struct 
> >>> dp_netdev_pmd_thread *pmd,
> >>>if (!error || error == 

Re: [ovs-dev] [RFC v3 PATCH 3/5] dpif-netdev: Skip encap action during datapath execution

2020-06-01 Thread Eli Britstein



On 5/28/2020 11:19 AM, Sriharsha Basavapatna wrote:

In this patch we check if action processing (apart from OUTPUT action),
should be skipped for a given dp_netdev_flow. Specifically, we check if
the action is TNL_PUSH and if it has been offloaded to HW, then we do not
push the tunnel header in SW. The datapath only executes the OUTPUT action.
The packet will be encapsulated in HW during transmit.

Signed-off-by: Sriharsha Basavapatna 
---
  lib/dpif-netdev.c | 46 --
  1 file changed, 36 insertions(+), 10 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 781b233f4..3e175c25e 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -112,6 +112,7 @@ COVERAGE_DEFINE(datapath_drop_recirc_error);
  COVERAGE_DEFINE(datapath_drop_invalid_port);
  COVERAGE_DEFINE(datapath_drop_invalid_tnl_port);
  COVERAGE_DEFINE(datapath_drop_rx_invalid_packet);
+COVERAGE_DEFINE(datapath_skip_tunnel_push);
  
  /* Protects against changes to 'dp_netdevs'. */

  static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
@@ -538,6 +539,16 @@ struct dp_netdev_flow {
  bool dead;
  uint32_t mark;   /* Unique flow mark assigned to a flow */
  
+/* The next two members are used to support partial offloading of

+ * actions. The boolean flag tells if this flow has its actions partially
+ * offloaded. The egress port# tells if the action should be offloaded
+ * on the egress (output) port instead of the in-port for the flow. Note
+ * that we support flows with a single egress port action.
+ * (see MAX_ACTION_ATTRS for related comments).
+ */
+bool partial_actions_offloaded;
+odp_port_t  egress_offload_port;
+
  /* Statistics. */
  struct dp_netdev_flow_stats stats;
  
@@ -791,7 +802,8 @@ static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,

bool should_steal,
const struct flow *flow,
const struct nlattr *actions,
-  size_t actions_len);
+  size_t actions_len,
+  const struct dp_netdev_flow *dp_flow);
  static void dp_netdev_input(struct dp_netdev_pmd_thread *,
  struct dp_packet_batch *, odp_port_t port_no);
  static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
@@ -3828,7 +3840,7 @@ dpif_netdev_execute(struct dpif *dpif, struct 
dpif_execute *execute)
  dp_packet_batch_init_packet(, execute->packet);
  pp.do_not_steal = true;
  dp_netdev_execute_actions(pmd, , false, execute->flow,
-  execute->actions, execute->actions_len);
+  execute->actions, execute->actions_len, NULL);
  dp_netdev_pmd_flush_output_packets(pmd, true);
  
  if (pmd->core_id == NON_PMD_CORE_ID) {

@@ -6456,7 +6468,7 @@ packet_batch_per_flow_execute(struct 
packet_batch_per_flow *batch,
  actions = dp_netdev_flow_get_actions(flow);
  
  dp_netdev_execute_actions(pmd, >array, true, >flow,

-  actions->actions, actions->size);
+  actions->actions, actions->size, flow);
  }
  
  static inline void

@@ -6764,7 +6776,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
   * we'll send the packet up twice. */
  dp_packet_batch_init_packet(, packet);
  dp_netdev_execute_actions(pmd, , true, ,
-  actions->data, actions->size);
+  actions->data, actions->size, NULL);
  
  add_actions = put_actions->size ? put_actions : actions;

  if (OVS_LIKELY(error != ENOSPC)) {
@@ -6999,6 +7011,7 @@ dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
  struct dp_netdev_execute_aux {
  struct dp_netdev_pmd_thread *pmd;
  const struct flow *flow;
+const struct dp_netdev_flow *dp_flow;/* for partial action offload */
  };
  
  static void

@@ -7143,7 +7156,7 @@ dp_execute_userspace_action(struct dp_netdev_pmd_thread 
*pmd,
  if (!error || error == ENOSPC) {
  dp_packet_batch_init_packet(, packet);
  dp_netdev_execute_actions(pmd, , should_steal, flow,
-  actions->data, actions->size);
+  actions->data, actions->size, NULL);
  } else if (should_steal) {
  dp_packet_delete(packet);
  COVERAGE_INC(datapath_drop_userspace_action_error);
@@ -7162,6 +7175,7 @@ dp_execute_cb(void *aux_, struct dp_packet_batch 
*packets_,
  int type = nl_attr_type(a);
  struct tx_port *p;
  uint32_t packet_count, packets_dropped;
+struct dp_netdev_flow *dp_flow = aux->dp_flow;
  
  switch ((enum ovs_action_attr)type) {

  case OVS_ACTION_ATTR_OUTPUT:
@@ -7219,9 +7233,20 @@ dp_execute_cb(void *aux_, struct dp_packet_batch 
*packets_,
  

Re: [ovs-dev] [RFC v3 PATCH 3/5] dpif-netdev: Skip encap action during datapath execution

2020-06-01 Thread Eli Britstein



On 6/1/2020 6:15 PM, Sriharsha Basavapatna wrote:

On Mon, Jun 1, 2020 at 7:58 PM Eli Britstein  wrote:


On 5/28/2020 11:19 AM, Sriharsha Basavapatna wrote:

In this patch we check if action processing (apart from OUTPUT action),
should be skipped for a given dp_netdev_flow. Specifically, we check if
the action is TNL_PUSH and if it has been offloaded to HW, then we do not
push the tunnel header in SW. The datapath only executes the OUTPUT action.
The packet will be encapsulated in HW during transmit.

Signed-off-by: Sriharsha Basavapatna 
---
   lib/dpif-netdev.c | 46 --
   1 file changed, 36 insertions(+), 10 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 781b233f4..3e175c25e 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -112,6 +112,7 @@ COVERAGE_DEFINE(datapath_drop_recirc_error);
   COVERAGE_DEFINE(datapath_drop_invalid_port);
   COVERAGE_DEFINE(datapath_drop_invalid_tnl_port);
   COVERAGE_DEFINE(datapath_drop_rx_invalid_packet);
+COVERAGE_DEFINE(datapath_skip_tunnel_push);

   /* Protects against changes to 'dp_netdevs'. */
   static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
@@ -538,6 +539,16 @@ struct dp_netdev_flow {
   bool dead;
   uint32_t mark;   /* Unique flow mark assigned to a flow */

+/* The next two members are used to support partial offloading of
+ * actions. The boolean flag tells if this flow has its actions partially
+ * offloaded. The egress port# tells if the action should be offloaded
+ * on the egress (output) port instead of the in-port for the flow. Note
+ * that we support flows with a single egress port action.
+ * (see MAX_ACTION_ATTRS for related comments).
+ */
+bool partial_actions_offloaded;
+odp_port_t  egress_offload_port;
+
   /* Statistics. */
   struct dp_netdev_flow_stats stats;

@@ -791,7 +802,8 @@ static void dp_netdev_execute_actions(struct 
dp_netdev_pmd_thread *pmd,
 bool should_steal,
 const struct flow *flow,
 const struct nlattr *actions,
-  size_t actions_len);
+  size_t actions_len,
+  const struct dp_netdev_flow *dp_flow);
   static void dp_netdev_input(struct dp_netdev_pmd_thread *,
   struct dp_packet_batch *, odp_port_t port_no);
   static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
@@ -3828,7 +3840,7 @@ dpif_netdev_execute(struct dpif *dpif, struct 
dpif_execute *execute)
   dp_packet_batch_init_packet(, execute->packet);
   pp.do_not_steal = true;
   dp_netdev_execute_actions(pmd, , false, execute->flow,
-  execute->actions, execute->actions_len);
+  execute->actions, execute->actions_len, NULL);
   dp_netdev_pmd_flush_output_packets(pmd, true);

   if (pmd->core_id == NON_PMD_CORE_ID) {
@@ -6456,7 +6468,7 @@ packet_batch_per_flow_execute(struct 
packet_batch_per_flow *batch,
   actions = dp_netdev_flow_get_actions(flow);

   dp_netdev_execute_actions(pmd, >array, true, >flow,
-  actions->actions, actions->size);
+  actions->actions, actions->size, flow);
   }

   static inline void
@@ -6764,7 +6776,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
* we'll send the packet up twice. */
   dp_packet_batch_init_packet(, packet);
   dp_netdev_execute_actions(pmd, , true, ,
-  actions->data, actions->size);
+  actions->data, actions->size, NULL);

   add_actions = put_actions->size ? put_actions : actions;
   if (OVS_LIKELY(error != ENOSPC)) {
@@ -6999,6 +7011,7 @@ dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
   struct dp_netdev_execute_aux {
   struct dp_netdev_pmd_thread *pmd;
   const struct flow *flow;
+const struct dp_netdev_flow *dp_flow;/* for partial action offload */
   };

   static void
@@ -7143,7 +7156,7 @@ dp_execute_userspace_action(struct dp_netdev_pmd_thread 
*pmd,
   if (!error || error == ENOSPC) {
   dp_packet_batch_init_packet(, packet);
   dp_netdev_execute_actions(pmd, , should_steal, flow,
-  actions->data, actions->size);
+  actions->data, actions->size, NULL);
   } else if (should_steal) {
   dp_packet_delete(packet);
   COVERAGE_INC(datapath_drop_userspace_action_error);
@@ -7162,6 +7175,7 @@ dp_execute_cb(void *aux_, struct dp_packet_batch 
*packets_,
   int type = nl_attr_type(a);
   struct tx_port *p;
   uint32_t packet_count, packets_dropped;
+struct dp_netdev_flow *dp_flow = aux->dp_flow;

   switch ((enum 

Re: [ovs-dev] [RFC v3 PATCH 3/5] dpif-netdev: Skip encap action during datapath execution

2020-06-01 Thread Sriharsha Basavapatna via dev
On Mon, Jun 1, 2020 at 7:58 PM Eli Britstein  wrote:
>
>
> On 5/28/2020 11:19 AM, Sriharsha Basavapatna wrote:
> > In this patch we check if action processing (apart from OUTPUT action),
> > should be skipped for a given dp_netdev_flow. Specifically, we check if
> > the action is TNL_PUSH and if it has been offloaded to HW, then we do not
> > push the tunnel header in SW. The datapath only executes the OUTPUT action.
> > The packet will be encapsulated in HW during transmit.
> >
> > Signed-off-by: Sriharsha Basavapatna 
> > ---
> >   lib/dpif-netdev.c | 46 --
> >   1 file changed, 36 insertions(+), 10 deletions(-)
> >
> > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> > index 781b233f4..3e175c25e 100644
> > --- a/lib/dpif-netdev.c
> > +++ b/lib/dpif-netdev.c
> > @@ -112,6 +112,7 @@ COVERAGE_DEFINE(datapath_drop_recirc_error);
> >   COVERAGE_DEFINE(datapath_drop_invalid_port);
> >   COVERAGE_DEFINE(datapath_drop_invalid_tnl_port);
> >   COVERAGE_DEFINE(datapath_drop_rx_invalid_packet);
> > +COVERAGE_DEFINE(datapath_skip_tunnel_push);
> >
> >   /* Protects against changes to 'dp_netdevs'. */
> >   static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
> > @@ -538,6 +539,16 @@ struct dp_netdev_flow {
> >   bool dead;
> >   uint32_t mark;   /* Unique flow mark assigned to a flow */
> >
> > +/* The next two members are used to support partial offloading of
> > + * actions. The boolean flag tells if this flow has its actions 
> > partially
> > + * offloaded. The egress port# tells if the action should be offloaded
> > + * on the egress (output) port instead of the in-port for the flow. 
> > Note
> > + * that we support flows with a single egress port action.
> > + * (see MAX_ACTION_ATTRS for related comments).
> > + */
> > +bool partial_actions_offloaded;
> > +odp_port_t  egress_offload_port;
> > +
> >   /* Statistics. */
> >   struct dp_netdev_flow_stats stats;
> >
> > @@ -791,7 +802,8 @@ static void dp_netdev_execute_actions(struct 
> > dp_netdev_pmd_thread *pmd,
> > bool should_steal,
> > const struct flow *flow,
> > const struct nlattr *actions,
> > -  size_t actions_len);
> > +  size_t actions_len,
> > +  const struct dp_netdev_flow 
> > *dp_flow);
> >   static void dp_netdev_input(struct dp_netdev_pmd_thread *,
> >   struct dp_packet_batch *, odp_port_t port_no);
> >   static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
> > @@ -3828,7 +3840,7 @@ dpif_netdev_execute(struct dpif *dpif, struct 
> > dpif_execute *execute)
> >   dp_packet_batch_init_packet(, execute->packet);
> >   pp.do_not_steal = true;
> >   dp_netdev_execute_actions(pmd, , false, execute->flow,
> > -  execute->actions, execute->actions_len);
> > +  execute->actions, execute->actions_len, 
> > NULL);
> >   dp_netdev_pmd_flush_output_packets(pmd, true);
> >
> >   if (pmd->core_id == NON_PMD_CORE_ID) {
> > @@ -6456,7 +6468,7 @@ packet_batch_per_flow_execute(struct 
> > packet_batch_per_flow *batch,
> >   actions = dp_netdev_flow_get_actions(flow);
> >
> >   dp_netdev_execute_actions(pmd, >array, true, >flow,
> > -  actions->actions, actions->size);
> > +  actions->actions, actions->size, flow);
> >   }
> >
> >   static inline void
> > @@ -6764,7 +6776,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread *pmd,
> >* we'll send the packet up twice. */
> >   dp_packet_batch_init_packet(, packet);
> >   dp_netdev_execute_actions(pmd, , true, ,
> > -  actions->data, actions->size);
> > +  actions->data, actions->size, NULL);
> >
> >   add_actions = put_actions->size ? put_actions : actions;
> >   if (OVS_LIKELY(error != ENOSPC)) {
> > @@ -6999,6 +7011,7 @@ dp_netdev_recirculate(struct dp_netdev_pmd_thread 
> > *pmd,
> >   struct dp_netdev_execute_aux {
> >   struct dp_netdev_pmd_thread *pmd;
> >   const struct flow *flow;
> > +const struct dp_netdev_flow *dp_flow;/* for partial action offload 
> > */
> >   };
> >
> >   static void
> > @@ -7143,7 +7156,7 @@ dp_execute_userspace_action(struct 
> > dp_netdev_pmd_thread *pmd,
> >   if (!error || error == ENOSPC) {
> >   dp_packet_batch_init_packet(, packet);
> >   dp_netdev_execute_actions(pmd, , should_steal, flow,
> > -  actions->data, actions->size);
> > +  actions->data, actions->size, NULL);
> >   } else if (should_steal) {
> >   dp_packet_delete(packet);
> >