Currently HW offload is accelerating only the rule matching sequence. Introduce a framework for offloading rule actions as a pre-step for processing the rule actions in HW. In case of a failure, fallback to the legacy partial offload scheme.
Note: a flow will be fully offloaded only if it can process all its actions in HW. Signed-off-by: Eli Britstein <el...@mellanox.com> Reviewed-by: Oz Shlomo <o...@mellanox.com> --- lib/netdev-offload-dpdk.c | 112 +++++++++++++++++++++++++++++++++++++++------- lib/netdev-offload.h | 1 + 2 files changed, 97 insertions(+), 16 deletions(-) diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c index c272da340..b2ec05cec 100644 --- a/lib/netdev-offload-dpdk.c +++ b/lib/netdev-offload-dpdk.c @@ -28,6 +28,7 @@ #include "uuid.h" VLOG_DEFINE_THIS_MODULE(netdev_offload_dpdk); +static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(100, 5); /* Thread-safety * ============= @@ -424,6 +425,93 @@ add_flow_mark_rss_actions(struct flow_actions *actions, add_flow_action(actions, RTE_FLOW_ACTION_TYPE_END, NULL); } +static struct rte_flow * +netdev_offload_dpdk_mark_rss(struct flow_patterns *patterns, + struct netdev *netdev, + uint32_t flow_mark) +{ + struct flow_actions actions = { .actions = NULL, .cnt = 0 }; + const struct rte_flow_attr flow_attr = { + .group = 0, + .priority = 0, + .ingress = 1, + .egress = 0 + }; + struct rte_flow_error error; + struct rte_flow *flow; + + add_flow_mark_rss_actions(&actions, flow_mark, netdev); + + flow = netdev_dpdk_rte_flow_create(netdev, &flow_attr, + patterns->items, + actions.actions, &error); + + if (!flow) { + VLOG_ERR("%s: Failed to create flow: %s (%u)\n", + netdev_get_name(netdev), error.message, error.type); + } + + free_flow_actions(&actions); + return flow; +} + +static int +parse_flow_actions(struct netdev *netdev OVS_UNUSED, + struct flow_actions *actions, + struct nlattr *nl_actions, + size_t nl_actions_len, + struct offload_info *info OVS_UNUSED) +{ + struct nlattr *nla; + size_t left; + + NL_ATTR_FOR_EACH_UNSAFE (nla, left, nl_actions, nl_actions_len) { + VLOG_DBG_RL(&error_rl, + "Unsupported action type %d", nl_attr_type(nla)); + return -1; + } + + if (nl_actions_len == 0) { + VLOG_DBG_RL(&error_rl, + "Unsupported action type drop"); + return -1; + } + + add_flow_action(actions, RTE_FLOW_ACTION_TYPE_END, NULL); + return 0; +} + +static struct rte_flow * +netdev_offload_dpdk_actions(struct netdev *netdev, + struct flow_patterns *patterns, + struct nlattr *nl_actions, + size_t actions_len, + struct offload_info *info) +{ + const struct rte_flow_attr flow_attr = { .ingress = 1, .transfer = 1 }; + struct flow_actions actions = { .actions = NULL, .cnt = 0 }; + struct rte_flow *flow = NULL; + struct rte_flow_error error; + int ret; + + ret = parse_flow_actions(netdev, &actions, nl_actions, actions_len, info); + if (ret) { + goto out; + } + flow = netdev_dpdk_rte_flow_create(netdev, &flow_attr, patterns->items, + actions.actions, &error); + if (!flow) { + VLOG_ERR("%s: Failed to create flow: %s (%u)\n", + netdev_get_name(netdev), error.message, error.type); + } + if (flow && info->actions_offloaded) { + *info->actions_offloaded = true; + } +out: + free_flow_actions(&actions); + return flow; +} + static int netdev_offload_dpdk_add_flow(struct netdev *netdev, const struct match *match, @@ -432,16 +520,8 @@ netdev_offload_dpdk_add_flow(struct netdev *netdev, const ovs_u128 *ufid, struct offload_info *info) { - const struct rte_flow_attr flow_attr = { - .group = 0, - .priority = 0, - .ingress = 1, - .egress = 0 - }; struct flow_patterns patterns = { .items = NULL, .cnt = 0 }; - struct flow_actions actions = { .actions = NULL, .cnt = 0 }; struct rte_flow *flow; - struct rte_flow_error error; int ret = 0; ret = parse_flow_match(&patterns, match); @@ -450,15 +530,16 @@ netdev_offload_dpdk_add_flow(struct netdev *netdev, goto out; } - add_flow_mark_rss_actions(&actions, info->flow_mark, netdev); - - flow = netdev_dpdk_rte_flow_create(netdev, &flow_attr, - patterns.items, - actions.actions, &error); + flow = netdev_offload_dpdk_actions(netdev, &patterns, nl_actions, + actions_len, info); + if (!flow) { + /* if we failed to offload the rule actions fallback to mark rss + * actions. + */ + flow = netdev_offload_dpdk_mark_rss(&patterns, netdev, info->flow_mark); + } if (!flow) { - VLOG_ERR("%s: Failed to create flow: %s (%u)\n", - netdev_get_name(netdev), error.message, error.type); ret = -1; goto out; } @@ -468,7 +549,6 @@ netdev_offload_dpdk_add_flow(struct netdev *netdev, out: free_flow_patterns(&patterns); - free_flow_actions(&actions); return ret; } diff --git a/lib/netdev-offload.h b/lib/netdev-offload.h index 97a500647..892dca576 100644 --- a/lib/netdev-offload.h +++ b/lib/netdev-offload.h @@ -71,6 +71,7 @@ struct offload_info { * it will be in the pkt meta data. */ uint32_t flow_mark; + bool *actions_offloaded; /* true if flow is fully actions_offloaded */ }; int netdev_flow_flush(struct netdev *); -- 2.14.5 _______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev