Signed-off-by: Nelio Laranjeiro <nelio.laranje...@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 681 +++++++++++++++++++++++++----------
 1 file changed, 484 insertions(+), 197 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 4ef0a3fee..27354615f 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -54,6 +54,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 /* Action fate on the packet. */
 #define MLX5_FLOW_FATE_DROP (1u << 0)
 #define MLX5_FLOW_FATE_QUEUE (1u << 1)
+#define MLX5_FLOW_FATE_RSS (1u << 2)
 
 /* Modify a packet. */
 #define MLX5_FLOW_MOD_FLAG (1u << 0)
@@ -68,15 +69,40 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 #define MLX5_FLOW_CTRL 0xffffffff
 #define MLX5_FLOW_CTRL_PRIO_OFFSET (MLX5_FLOW_PRIO_L2 + 1)
 
+#define MLX5_RSS_EXP_SUPP(...) \
+       ((const enum rte_flow_item_type []) \
+        { __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END })
+
+/** Supported expansion of items. */
+static const enum rte_flow_item_type *mlx5_supported_expansion[] = {
+       MLX5_RSS_EXP_SUPP(RTE_FLOW_ITEM_TYPE_ETH),
+       MLX5_RSS_EXP_SUPP(RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_IPV4),
+       MLX5_RSS_EXP_SUPP(RTE_FLOW_ITEM_TYPE_ETH,
+                         RTE_FLOW_ITEM_TYPE_IPV4,
+                         RTE_FLOW_ITEM_TYPE_UDP),
+       MLX5_RSS_EXP_SUPP(RTE_FLOW_ITEM_TYPE_ETH,
+                         RTE_FLOW_ITEM_TYPE_IPV4,
+                         RTE_FLOW_ITEM_TYPE_TCP),
+       MLX5_RSS_EXP_SUPP(RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_IPV6),
+       MLX5_RSS_EXP_SUPP(RTE_FLOW_ITEM_TYPE_ETH,
+                         RTE_FLOW_ITEM_TYPE_IPV6,
+                         RTE_FLOW_ITEM_TYPE_UDP),
+       MLX5_RSS_EXP_SUPP(RTE_FLOW_ITEM_TYPE_ETH,
+                         RTE_FLOW_ITEM_TYPE_IPV6,
+                         RTE_FLOW_ITEM_TYPE_TCP),
+};
+
 /** Handles information leading to a drop fate. */
 struct mlx5_flow_verbs {
-       unsigned int size; /**< Size of the attribute. */
+       LIST_ENTRY(mlx5_flow_verbs) next;
+       /**< Pointer to the next Verbs flow structure. */
        uint32_t layers;
        /**< Bit-fields of present layers see MLX5_FLOW_ITEMS_*. */
        uint32_t modifier;
        /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
        uint32_t fate;
        /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
+       unsigned int size; /**< Size of the attribute. */
        struct {
                struct ibv_flow_attr *attr;
                /**< Pointer to the Specification buffer. */
@@ -84,14 +110,19 @@ struct mlx5_flow_verbs {
        };
        struct ibv_flow *flow; /**< Verbs flow pointer. */
        struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+       uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
 };
 
 /* Flow structure. */
 struct rte_flow {
        TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
        struct rte_flow_attr attributes; /**< User flow attribute. */
-       struct mlx5_flow_verbs verbs; /* Verbs flow. */
-       uint16_t queue; /**< Destination queue to redirect traffic to. */
+       LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
+       struct mlx5_flow_verbs *cur_verbs;
+       /**< Current Verbs flow structure being filled. */
+       struct rte_flow_action_rss rss;/**< RSS context. */
+       uint8_t key[40]; /**< RSS hash key. */
+       uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
 };
 
 static const struct rte_flow_ops mlx5_flow_ops = {
@@ -144,6 +175,8 @@ void
 mlx5_flow_print(struct rte_flow *flow __rte_unused)
 {
 #ifndef NDEBUG
+       struct mlx5_flow_verbs *verbs = LIST_FIRST(&flow->verbs);
+
        fprintf(stdout, "---------8<------------\n");
        fprintf(stdout, "%s: flow information\n", MLX5_DRIVER_NAME);
        fprintf(stdout, " attributes: group %u priority %u ingress %d egress %d"
@@ -152,27 +185,36 @@ mlx5_flow_print(struct rte_flow *flow __rte_unused)
                flow->attributes.ingress,
                flow->attributes.egress,
                flow->attributes.transfer);
-       fprintf(stdout, " layers: %s/%s/%s\n",
-               flow->verbs.layers & MLX5_FLOW_LAYER_L2 ? "l2" : "-",
-               flow->verbs.layers & MLX5_FLOW_LAYER_L3 ? "l3" : "-",
-               flow->verbs.layers & MLX5_FLOW_LAYER_L4 ? "l4" : "-");
-       if (flow->verbs.fate & MLX5_FLOW_FATE_DROP)
+       if (verbs->fate & MLX5_FLOW_FATE_DROP) {
                fprintf(stdout, " fate: drop queue\n");
-       else if (flow->verbs.fate & MLX5_FLOW_FATE_QUEUE)
-               fprintf(stdout, " fate: target queue %u\n", flow->queue);
-       if (flow->verbs.attr) {
-               struct ibv_spec_header *hdr =
-                       (struct ibv_spec_header *)flow->verbs.specs;
-               const int n = flow->verbs.attr->num_of_specs;
-               int i;
-
-               fprintf(stdout, " Verbs attributes: priority %u specs_n %u\n",
-                       flow->verbs.attr->priority,
-                       flow->verbs.attr->num_of_specs);
-               for (i = 0; i != n; ++i) {
-                       rte_hexdump(stdout, " ", hdr, hdr->size);
-                       hdr = (struct ibv_spec_header *)
-                               ((uint8_t *)hdr + hdr->size);
+       } else {
+               uint16_t i;
+
+               fprintf(stdout, " fate: target queues");
+               for (i = 0; i != flow->rss.queue_num; ++i)
+                       fprintf(stdout, " %u", (*flow->queue)[i]);
+               fprintf(stdout, "\n");
+       }
+       LIST_FOREACH(verbs, &flow->verbs, next) {
+               fprintf(stdout, " layers: %s/%s/%s\n",
+                       verbs->layers & MLX5_FLOW_LAYER_L2 ? "l2" : "-",
+                       verbs->layers & MLX5_FLOW_LAYER_L3 ? "l3" : "-",
+                       verbs->layers & MLX5_FLOW_LAYER_L4 ? "l4" : "-");
+               if (verbs->attr) {
+                       struct ibv_spec_header *hdr =
+                               (struct ibv_spec_header *)verbs->specs;
+                       const int n = verbs->attr->num_of_specs;
+                       int i;
+
+                       fprintf(stdout, " Verbs attributes: priority %u"
+                               " specs_n %u\n",
+                               verbs->attr->priority,
+                               verbs->attr->num_of_specs);
+                       for (i = 0; i != n; ++i) {
+                               rte_hexdump(stdout, " ", hdr, hdr->size);
+                               hdr = (struct ibv_spec_header *)
+                                       ((uint8_t *)hdr + hdr->size);
+                       }
                }
        }
        fprintf(stdout, "--------->8------------\n");
@@ -289,24 +331,25 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
 /**
  * Add a verbs specification.
  *
- * @param flow
- *   Pointer to flow structure.
+ * @param verbs
+ *   Pointer to mlx5_flow_verbs structure.
  * @param src
  *   Create specification.
  * @param size
  *   Size in bytes of the specification to copy.
  */
 static void
-mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
+mlx5_flow_spec_verbs_add(struct mlx5_flow_verbs *verbs, void *src,
+                        unsigned int size)
 {
-       if (flow->verbs.specs) {
+       if (verbs->specs) {
                void *dst;
 
-               dst = (void *)(flow->verbs.specs + flow->verbs.size);
+               dst = (void *)(verbs->specs + verbs->size);
                memcpy(dst, src, size);
-               ++flow->verbs.attr->num_of_specs;
+               ++verbs->attr->num_of_specs;
        }
-       flow->verbs.size += size;
+       verbs->size += size;
 }
 
 /**
@@ -334,13 +377,14 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, 
struct rte_flow *flow,
                .type = RTE_BE16(0xffff),
        };
        const unsigned int size = sizeof(struct ibv_flow_spec_eth);
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
        struct ibv_flow_spec_eth eth = {
                .type = IBV_FLOW_SPEC_ETH,
                .size = size,
        };
        int ret;
 
-       if (flow->verbs.layers & MLX5_FLOW_LAYER_L2)
+       if (verbs->layers & MLX5_FLOW_LAYER_L2)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          item,
@@ -369,8 +413,8 @@ mlx5_flow_item_eth(const struct rte_flow_item *item, struct 
rte_flow *flow,
                }
                eth.val.ether_type &= eth.mask.ether_type;
        }
-       mlx5_flow_spec_verbs_add(flow, &eth, size);
-       flow->verbs.layers |= MLX5_FLOW_LAYER_L2;
+       mlx5_flow_spec_verbs_add(verbs, &eth, size);
+       verbs->layers |= MLX5_FLOW_LAYER_L2;
        return 0;
 }
 
@@ -426,6 +470,7 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, 
struct rte_flow *flow,
        const struct rte_flow_item_vlan nic_mask = {
                .tci = RTE_BE16(0x0fff),
        };
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
        const unsigned int size = sizeof(struct ibv_flow_spec_eth);
        struct ibv_flow_spec_eth eth = {
                .type = IBV_FLOW_SPEC_ETH,
@@ -436,12 +481,12 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, 
struct rte_flow *flow,
        const uint32_t vlanm = MLX5_FLOW_LAYER_VLAN;
        const uint32_t l2m = MLX5_FLOW_LAYER_L2;
 
-       if (flow->verbs.layers & vlanm)
+       if (verbs->layers & vlanm)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          item,
                                          "L2 layers already configured");
-       else if ((flow->verbs.layers & lm) != 0)
+       else if ((verbs->layers & lm) != 0)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          item,
@@ -477,11 +522,11 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, 
struct rte_flow *flow,
                                          item->spec,
                                          "VLAN TPID matching is not"
                                          " supported");
-       if (!(flow->verbs.layers & l2m))
-               mlx5_flow_spec_verbs_add(flow, &eth, size);
+       if (!(verbs->layers & l2m))
+               mlx5_flow_spec_verbs_add(verbs, &eth, size);
        else
-               mlx5_flow_item_vlan_update(flow->verbs.attr, &eth);
-       flow->verbs.layers |= MLX5_FLOW_LAYER_L2 | MLX5_FLOW_LAYER_VLAN;
+               mlx5_flow_item_vlan_update(verbs->attr, &eth);
+       verbs->layers |= MLX5_FLOW_LAYER_L2 | MLX5_FLOW_LAYER_VLAN;
        return 0;
 }
 
@@ -512,6 +557,7 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, 
struct rte_flow *flow,
                        .next_proto_id = 0xff,
                },
        };
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
        unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
        struct ibv_flow_spec_ipv4_ext ipv4 = {
                .type = IBV_FLOW_SPEC_IPV4_EXT,
@@ -519,12 +565,12 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, 
struct rte_flow *flow,
        };
        int ret;
 
-       if (flow->verbs.layers & MLX5_FLOW_LAYER_L3)
+       if (verbs->layers & MLX5_FLOW_LAYER_L3)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          item,
                                          "multiple L3 layers not supported");
-       else if (flow->verbs.layers & MLX5_FLOW_LAYER_L4)
+       else if (verbs->layers & MLX5_FLOW_LAYER_L4)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          item,
@@ -555,8 +601,8 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, 
struct rte_flow *flow,
                ipv4.val.proto &= ipv4.mask.proto;
                ipv4.val.tos &= ipv4.mask.tos;
        }
-       mlx5_flow_spec_verbs_add(flow, &ipv4, size);
-       flow->verbs.layers |= MLX5_FLOW_LAYER_L3_IPV4;
+       mlx5_flow_spec_verbs_add(verbs, &ipv4, size);
+       verbs->layers |= MLX5_FLOW_LAYER_L3_IPV4;
        return 0;
 }
 
@@ -592,6 +638,7 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, 
struct rte_flow *flow,
                        .hop_limits = 0xff,
                },
        };
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
        unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
        struct ibv_flow_spec_ipv6 ipv6 = {
                .type = IBV_FLOW_SPEC_IPV6,
@@ -599,12 +646,12 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, 
struct rte_flow *flow,
        };
        int ret;
 
-       if (flow->verbs.layers & MLX5_FLOW_LAYER_L3)
+       if (verbs->layers & MLX5_FLOW_LAYER_L3)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          item,
                                          "multiple L3 layers not supported");
-       else if (flow->verbs.layers & MLX5_FLOW_LAYER_L4)
+       else if (verbs->layers & MLX5_FLOW_LAYER_L4)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          item,
@@ -655,8 +702,8 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, 
struct rte_flow *flow,
                ipv6.val.next_hdr &= ipv6.mask.next_hdr;
                ipv6.val.hop_limit &= ipv6.mask.hop_limit;
        }
-       mlx5_flow_spec_verbs_add(flow, &ipv6, size);
-       flow->verbs.layers |= MLX5_FLOW_LAYER_L3_IPV6;
+       mlx5_flow_spec_verbs_add(verbs, &ipv6, size);
+       verbs->layers |= MLX5_FLOW_LAYER_L3_IPV6;
        return 0;
 }
 
@@ -679,6 +726,7 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct 
rte_flow *flow,
 {
        const struct rte_flow_item_udp *spec = item->spec;
        const struct rte_flow_item_udp *mask = item->mask;
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
        unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
        struct ibv_flow_spec_tcp_udp udp = {
                .type = IBV_FLOW_SPEC_UDP,
@@ -686,7 +734,7 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct 
rte_flow *flow,
        };
        int ret;
 
-       if (flow->verbs.layers & MLX5_FLOW_LAYER_L4)
+       if (verbs->layers & MLX5_FLOW_LAYER_L4)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          item,
@@ -707,8 +755,8 @@ mlx5_flow_item_udp(const struct rte_flow_item *item, struct 
rte_flow *flow,
                udp.val.src_port &= udp.mask.src_port;
                udp.val.dst_port &= udp.mask.dst_port;
        }
-       mlx5_flow_spec_verbs_add(flow, &udp, size);
-       flow->verbs.layers |= MLX5_FLOW_LAYER_L4_UDP;
+       mlx5_flow_spec_verbs_add(verbs, &udp, size);
+       verbs->layers |= MLX5_FLOW_LAYER_L4_UDP;
        return 0;
 }
 
@@ -731,6 +779,7 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct 
rte_flow *flow,
 {
        const struct rte_flow_item_tcp *spec = item->spec;
        const struct rte_flow_item_tcp *mask = item->mask;
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
        unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
        struct ibv_flow_spec_tcp_udp tcp = {
                .type = IBV_FLOW_SPEC_TCP,
@@ -738,7 +787,7 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct 
rte_flow *flow,
        };
        int ret;
 
-       if (flow->verbs.layers & MLX5_FLOW_LAYER_L4)
+       if (verbs->layers & MLX5_FLOW_LAYER_L4)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ITEM,
                                          item,
@@ -759,8 +808,8 @@ mlx5_flow_item_tcp(const struct rte_flow_item *item, struct 
rte_flow *flow,
                tcp.val.src_port &= tcp.mask.src_port;
                tcp.val.dst_port &= tcp.mask.dst_port;
        }
-       mlx5_flow_spec_verbs_add(flow, &tcp, size);
-       flow->verbs.layers |= MLX5_FLOW_LAYER_L4_TCP;
+       mlx5_flow_spec_verbs_add(verbs, &tcp, size);
+       verbs->layers |= MLX5_FLOW_LAYER_L4_TCP;
        return 0;
 }
 
@@ -782,6 +831,8 @@ mlx5_flow_items(const struct rte_flow_item items[],
                struct rte_flow *flow __rte_unused,
                struct rte_flow_error *error)
 {
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
                int ret = 0;
 
@@ -815,7 +866,7 @@ mlx5_flow_items(const struct rte_flow_item items[],
                if (ret < 0)
                        return ret;
        }
-       if (!flow->verbs.layers) {
+       if (!verbs->layers) {
                const struct rte_flow_item item = {
                        .type = RTE_FLOW_ITEM_TYPE_ETH,
                };
@@ -845,15 +896,16 @@ mlx5_flow_action_drop(const struct rte_flow_action 
*actions,
                        .type = IBV_FLOW_SPEC_ACTION_DROP,
                        .size = size,
        };
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
 
-       if (flow->verbs.fate)
+       if (verbs->fate)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
                                          actions,
                                          "multiple fate actions are not"
                                          " supported");
-       mlx5_flow_spec_verbs_add(flow, &drop, size);
-       flow->verbs.fate |= MLX5_FLOW_FATE_DROP;
+       mlx5_flow_spec_verbs_add(verbs, &drop, size);
+       verbs->fate |= MLX5_FLOW_FATE_DROP;
        return 0;
 }
 
@@ -877,8 +929,9 @@ mlx5_flow_action_queue(struct rte_eth_dev *dev,
 {
        struct priv *priv = dev->data->dev_private;
        const struct rte_flow_action_queue *queue = actions->conf;
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
 
-       if (flow->verbs.fate)
+       if (verbs->fate)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
                                          actions,
@@ -894,8 +947,144 @@ mlx5_flow_action_queue(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &queue->index,
                                          "queue is not configured");
-       flow->queue = queue->index;
-       flow->verbs.fate |= MLX5_FLOW_FATE_QUEUE;
+       if (flow->queue)
+               (*flow->queue)[0] = queue->index;
+       flow->rss.queue_num = 1;
+       verbs->fate |= MLX5_FLOW_FATE_QUEUE;
+       return 0;
+}
+
+/**
+ * Store the Verbs hash field according to the layer and types.
+ *
+ * @param verbs
+ *   Pointer to a struct mlx5_flow_verb.
+ * @param types
+ *   RSS types for this flow (see ETH_RSS_*).
+ */
+static void
+mlx5_flow_action_rss_hash_filed(struct mlx5_flow_verbs *verbs, uint32_t types)
+{
+       if ((types & ETH_RSS_NONFRAG_IPV4_TCP) &&
+           (verbs->layers & MLX5_FLOW_LAYER_L4_TCP))
+               verbs->hash_fields = IBV_RX_HASH_SRC_IPV4 |
+                       IBV_RX_HASH_DST_IPV4 |
+                       IBV_RX_HASH_SRC_PORT_TCP |
+                       IBV_RX_HASH_DST_PORT_TCP;
+       else if ((types & ETH_RSS_NONFRAG_IPV4_UDP) &&
+                (verbs->layers & MLX5_FLOW_LAYER_L4_UDP))
+               verbs->hash_fields = IBV_RX_HASH_SRC_IPV4 |
+                       IBV_RX_HASH_DST_IPV4 |
+                       IBV_RX_HASH_SRC_PORT_UDP |
+                       IBV_RX_HASH_DST_PORT_UDP;
+       else if ((types & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) &&
+                (verbs->layers & MLX5_FLOW_LAYER_L3_IPV4))
+               verbs->hash_fields = IBV_RX_HASH_SRC_IPV4 |
+                       IBV_RX_HASH_DST_IPV4;
+       else if ((types & ETH_RSS_NONFRAG_IPV6_TCP) &&
+                (verbs->layers & MLX5_FLOW_LAYER_L4_TCP))
+               verbs->hash_fields = IBV_RX_HASH_SRC_IPV6 |
+                       IBV_RX_HASH_DST_IPV6 |
+                       IBV_RX_HASH_SRC_PORT_TCP |
+                       IBV_RX_HASH_DST_PORT_TCP;
+       else if ((types & ETH_RSS_NONFRAG_IPV6_UDP) &&
+                (verbs->layers & MLX5_FLOW_LAYER_L3_IPV6))
+               verbs->hash_fields = IBV_RX_HASH_SRC_IPV6 |
+                       IBV_RX_HASH_DST_IPV6 |
+                       IBV_RX_HASH_SRC_PORT_UDP |
+                       IBV_RX_HASH_DST_PORT_UDP;
+       else if ((types & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) &&
+                (verbs->layers & MLX5_FLOW_LAYER_L3_IPV6))
+               verbs->hash_fields = IBV_RX_HASH_SRC_IPV6 |
+                       IBV_RX_HASH_DST_IPV6;
+       else
+               verbs->hash_fields = 0;
+}
+
+/**
+ * Validate action queue provided by the user.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param actions
+ *   Pointer to flow actions array.
+ * @param flow
+ *   Pointer to the rte_flow structure.
+ * @param error
+ *   Pointer to error structure.
+ */
+static int
+mlx5_flow_action_rss(struct rte_eth_dev *dev,
+                    const struct rte_flow_action *actions,
+                    struct rte_flow *flow,
+                    struct rte_flow_error *error)
+{
+       struct priv *priv = dev->data->dev_private;
+       const struct rte_flow_action_rss *rss = actions->conf;
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+       unsigned int i;
+
+       /*
+        * If verbs list is not empty, this action has already been parsed
+        * with another items list.
+        */
+       if (!LIST_EMPTY(&flow->verbs)) {
+               verbs->fate |= MLX5_FLOW_FATE_RSS;
+               return 0;
+       }
+       if (verbs->fate)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION,
+                                         actions,
+                                         "multiple fate actions are not"
+                                         " supported");
+       if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+           rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->func,
+                                         "RSS hash function not supported");
+       if (rss->level > 1)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->level,
+                                         "tunnel RSS is not supported");
+       if (rss->key_len < rss_hash_default_key_len)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->key_len,
+                                         "RSS hash key too small");
+       if (rss->key_len > rss_hash_default_key_len)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->key_len,
+                                         "RSS hash key too large");
+       if (rss->queue_num > priv->config.ind_table_max_size)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->queue_num,
+                                         "number of queues too large");
+       if (rss->types & MLX5_RSS_HF_MASK)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                         &rss->types,
+                                         "some RSS protocols are not"
+                                         " supported");
+       for (i = 0; i != rss->queue_num; ++i) {
+               if (!(*priv->rxqs)[rss->queue[i]])
+                       return rte_flow_error_set
+                               (error, EINVAL,
+                                RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+                                &rss->queue[i],
+                                "queue is not configured");
+       }
+       if (flow->queue)
+               memcpy((*flow->queue), rss->queue,
+                      rss->queue_num * sizeof(uint16_t));
+       flow->rss.queue_num = rss->queue_num;
+       memcpy(flow->key, rss->key, rss_hash_default_key_len);
+       flow->rss.types = rss->types;
+       verbs->fate |= MLX5_FLOW_FATE_RSS;
        return 0;
 }
 
@@ -919,31 +1108,32 @@ mlx5_flow_action_flag(struct rte_flow *flow)
                .size = size,
                .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
        };
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
 
-       if (flow->verbs.modifier & MLX5_FLOW_MOD_MARK)
+       if (verbs->modifier & MLX5_FLOW_MOD_MARK)
                return 0;
-       mlx5_flow_spec_verbs_add(flow, &tag, size);
-       flow->verbs.modifier |= MLX5_FLOW_MOD_FLAG;
+       mlx5_flow_spec_verbs_add(verbs, &tag, size);
+       verbs->modifier |= MLX5_FLOW_MOD_FLAG;
        return 0;
 }
 
 /**
  * Update verbs specification to modify the flag to mark.
  *
- * @param flow
- *   Pointer to the rte_flow structure.
+ * @param verbs
+ *   Pointer to the mlx5_flow_verbs structure.
  * @param mark_id
  *   Mark identifier to replace the flag.
  */
 static void
-mlx5_flow_action_mark_fate_queue(struct rte_flow *flow, uint32_t mark_id)
+mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
 {
        int i;
 
        /* Update Verbs specification. */
-       for (i = 0; i != flow->verbs.attr->num_of_specs; ++i) {
+       for (i = 0; i != verbs->attr->num_of_specs; ++i) {
                struct ibv_spec_header *hdr =
-                       (struct ibv_spec_header *)flow->verbs.attr;
+                       (struct ibv_spec_header *)verbs->attr;
 
                if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
                        struct ibv_flow_spec_action_tag *t =
@@ -978,6 +1168,7 @@ mlx5_flow_action_mark(const struct rte_flow_action 
*actions,
                .type = IBV_FLOW_SPEC_ACTION_TAG,
                .size = size,
        };
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
 
        if (!mark)
                return rte_flow_error_set(error, EINVAL,
@@ -990,13 +1181,13 @@ mlx5_flow_action_mark(const struct rte_flow_action 
*actions,
                                          &mark->id,
                                          "mark must be between 0 and"
                                          " 16777199");
-       if (flow->verbs.modifier & MLX5_FLOW_MOD_FLAG) {
-               mlx5_flow_action_mark_fate_queue(flow, mark->id);
+       if (verbs->modifier & MLX5_FLOW_MOD_FLAG) {
+               mlx5_flow_verbs_mark_update(verbs, mark->id);
        } else {
                tag.tag_id = mlx5_flow_mark_set(mark->id);
-               mlx5_flow_spec_verbs_add(flow, &tag, size);
+               mlx5_flow_spec_verbs_add(verbs, &tag, size);
        }
-       flow->verbs.modifier |= MLX5_FLOW_MOD_MARK;
+       verbs->modifier |= MLX5_FLOW_MOD_MARK;
        return 0;
 }
 
@@ -1019,13 +1210,14 @@ mlx5_flow_actions(struct rte_eth_dev *dev,
                  struct rte_flow_error *error)
 {
        int ret;
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
 
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
                case RTE_FLOW_ACTION_TYPE_VOID:
                        break;
                case RTE_FLOW_ACTION_TYPE_FLAG:
-                       if (flow->verbs.modifier & MLX5_FLOW_MOD_MARK)
+                       if (verbs->modifier & MLX5_FLOW_MOD_MARK)
                                return rte_flow_error_set
                                        (error, ENOTSUP,
                                         RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1042,6 +1234,9 @@ mlx5_flow_actions(struct rte_eth_dev *dev,
                case RTE_FLOW_ACTION_TYPE_QUEUE:
                        ret = mlx5_flow_action_queue(dev, actions, flow, error);
                        break;
+               case RTE_FLOW_ACTION_TYPE_RSS:
+                       ret = mlx5_flow_action_rss(dev, actions, flow, error);
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1051,7 +1246,7 @@ mlx5_flow_actions(struct rte_eth_dev *dev,
                if (ret < 0)
                        return ret;
        }
-       if (!flow->verbs.fate)
+       if (!verbs->fate)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                          NULL,
@@ -1059,6 +1254,30 @@ mlx5_flow_actions(struct rte_eth_dev *dev,
        return 0;
 }
 
+/**
+ * Update the verbs specification according to the pattern it matches.
+ *
+ * @param layers
+ *   Bit-fields of present layers see MLX5_FLOW_ITEMS_*.
+ * @param attr[in, out]
+ *   Pointer to Verbs attribute to update.
+ * @param control[in]
+ *   The specification is used for default PMD flows.
+ */
+static void
+mlx5_flow_verbs_priority(uint32_t layers, struct ibv_flow_attr *attr,
+                        const uint32_t control)
+{
+       if (layers & MLX5_FLOW_LAYER_L4)
+               attr->priority = MLX5_FLOW_PRIO_L4;
+       else if (layers & MLX5_FLOW_LAYER_L3)
+               attr->priority = MLX5_FLOW_PRIO_L3;
+       else
+               attr->priority = MLX5_FLOW_PRIO_L2;
+       if (control)
+               attr->priority += MLX5_FLOW_CTRL_PRIO_OFFSET;
+}
+
 /**
  * Validate the rule and return a flow structure filled accordingly.
  *
@@ -1089,31 +1308,89 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct 
rte_flow *flow,
                const struct rte_flow_action actions[],
                struct rte_flow_error *error)
 {
-       struct rte_flow vflow = { .verbs.layers = 0, };
-       size_t size;
+       struct mlx5_flow_verbs vverbs = { .layers = 0, };
+       struct rte_flow vflow = { .cur_verbs = &vverbs, };
+       struct rte_flow_expand_rss *buf;
        int ret;
+       size_t size;
+       uint32_t i;
 
        /* Make a first virtual parse. */
        ret = mlx5_flow_actions(dev, actions, &vflow, error);
        if (ret)
                return ret;
-       ret = mlx5_flow_items(items, &vflow, error);
+       ret = rte_flow_expand_rss(NULL, 0, items, vflow.rss.types,
+                                 mlx5_supported_expansion);
+       assert(ret > 0);
+       buf = rte_calloc(__func__, 1, ret, 0);
+       if (!buf) {
+               rte_flow_error_set(error, ENOMEM,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL,
+                                  "not enough memory to expand the RSS flow");
+               goto error;
+       }
+       ret = rte_flow_expand_rss(buf, ret, items, vflow.rss.types,
+                                 mlx5_supported_expansion);
+       assert(ret > 0);
+       /* Create the flow. */
+       size = sizeof(vflow) + vflow.rss.queue_num * sizeof(uint16_t);
+       size = RTE_ALIGN_CEIL(size, sizeof(void *));
+       if (size >= flow_size)
+               flow = &vflow;
+       else
+               flow->queue = (void *)(flow + 1);
+       LIST_INIT(&flow->verbs);
+       ret = mlx5_flow_attributes(attr, flow, error);
        if (ret)
                return ret;
-       /* Size of the verbs specification is now known. */
-       size = sizeof(vflow) + sizeof(struct ibv_flow_attr) + vflow.verbs.size;
-       if (size <= flow_size) {
-               ret = mlx5_flow_attributes(attr, flow, error);
-               if (ret)
-                       return ret;
-               ret = mlx5_flow_items(items, flow, error);
+       for (i = 0; i != buf->entries; ++i) {
+               size_t off = size;
+
+               memset(&vverbs, 0, sizeof(vverbs));
+               flow->cur_verbs = &vverbs;
+               ret = mlx5_flow_items
+                       ((const struct rte_flow_item *)buf->patterns[i],
+                        flow, error);
                if (ret)
-                       return ret;
+                       goto error;
                ret = mlx5_flow_actions(dev, actions, flow, error);
                if (ret)
-                       return ret;
+                       goto error;
+               /* Size of the verbs specification is now known. */
+               size += sizeof(struct ibv_flow_attr) + sizeof(vverbs) +
+                       vverbs.size;
+               if (size <= flow_size) {
+                       flow->cur_verbs = (void *)((uintptr_t)flow + off);
+                       flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
+                       flow->cur_verbs->specs =
+                               (void *)(flow->cur_verbs->attr + 1);
+                       ret = mlx5_flow_items
+                               ((const struct rte_flow_item *)buf->patterns[i],
+                                flow, error);
+                       if (ret)
+                               goto error;
+                       ret = mlx5_flow_actions(dev, actions, flow, error);
+                       if (ret)
+                               goto error;
+                       /*
+                        * Note: This verbs priority adjustment could be done
+                        * using the priority reported by the expansion.
+                        */
+                       mlx5_flow_verbs_priority(flow->cur_verbs->layers,
+                                                flow->cur_verbs->attr,
+                                                flow->attributes.priority ==
+                                                MLX5_FLOW_CTRL);
+                       mlx5_flow_action_rss_hash_filed(flow->cur_verbs,
+                                                       flow->rss.types);
+                       LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
+               }
        }
+       rte_free(buf);
        return size;
+error:
+       rte_free(buf);
+       return ret;
 }
 
 /**
@@ -1131,25 +1408,43 @@ mlx5_flow_rxq_mark(struct rte_eth_dev *dev, struct 
rte_flow *flow, int enable)
 {
        struct priv *priv = dev->data->dev_private;
        struct mlx5_rxq_data *rxq_data;
+       uint32_t i;
        const uint32_t mask = MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK;
-       struct rte_flow *tmp;
-       uint32_t mark = !!enable;
+       struct mlx5_flow_verbs *verbs;
 
-       if (!(flow->verbs.modifier & mask))
+       verbs = LIST_FIRST(&flow->verbs);
+       if (!(verbs->modifier & mask))
                return;
-       rxq_data = (*priv->rxqs)[flow->queue];
-       /**
-        * Mark/Flag bit can only be disabled when there is no other
-        * flow applied using the same queue has a MARK/FLOW action
-        * configured.
-        */
-       TAILQ_FOREACH(tmp, &priv->flows, next) {
-               if (tmp == flow)
-                       continue;
-               if (tmp->queue == flow->queue)
-                       mark |= !!(tmp->verbs.modifier & mask);
+       for (i = 0; i != flow->rss.queue_num; ++i) {
+               int mark = 0;
+               unsigned int qidx = (*flow->queue)[i];
+               struct rte_flow *tmp;
+               rxq_data = (*priv->rxqs)[qidx];
+
+               if (enable) {
+                       rxq_data->mark = !!enable;
+                       break;
+               }
+               /**
+                * Mark/Flag bit can only be disabled when there is no other
+                * flow applied using the same queue has a MARK/FLOW action
+                * configured.
+                */
+               TAILQ_FOREACH(tmp, &priv->flows, next) {
+                       uint32_t j;
+                       struct mlx5_flow_verbs *vtmp;
+
+                       if (tmp == flow)
+                               continue;
+                       vtmp = LIST_FIRST(&tmp->verbs);
+                       for (j = 0; j != tmp->rss.queue_num; ++j)
+                               if ((*tmp->queue)[i] == qidx) {
+                                       mark |= !!(vtmp->modifier & mask);
+                                       break;
+                               }
+               }
+               rxq_data->mark = mark;
        }
-       rxq_data->mark = mark;
 }
 
 /**
@@ -1183,43 +1478,21 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
 static void
 mlx5_flow_fate_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-       if (flow->verbs.fate & MLX5_FLOW_FATE_DROP) {
-               if (flow->verbs.flow) {
-                       claim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));
-                       flow->verbs.flow = NULL;
+       struct mlx5_flow_verbs *verbs;
+
+       LIST_FOREACH(verbs, &flow->verbs, next) {
+               if (verbs->flow) {
+                       claim_zero(mlx5_glue->destroy_flow(verbs->flow));
+                       verbs->flow = NULL;
+               }
+               if (verbs->hrxq) {
+                       if (verbs->fate & MLX5_FLOW_FATE_DROP)
+                               mlx5_hrxq_drop_release(dev, verbs->hrxq);
+                       else
+                               mlx5_hrxq_release(dev, verbs->hrxq);
+                       verbs->hrxq = NULL;
                }
        }
-       if (flow->verbs.hrxq) {
-               if (flow->verbs.fate & MLX5_FLOW_FATE_DROP)
-                       mlx5_hrxq_drop_release(dev, flow->verbs.hrxq);
-               else if (flow->verbs.fate & MLX5_FLOW_FATE_QUEUE)
-                       mlx5_hrxq_release(dev, flow->verbs.hrxq);
-               flow->verbs.hrxq = NULL;
-       }
-}
-
-/**
- * Update the verbs specification according to the pattern it matches.
- *
- * @param layers
- *   Bit-fields of present layers see MLX5_FLOW_ITEMS_*.
- * @param attr[in, out]
- *   Pointer to Verbs attribute to update.
- * @param control[in]
- *   The specification is used for default PMD flows.
- */
-static void
-mlx5_flow_verbs_priority(uint32_t layers, struct ibv_flow_attr *attr,
-                        const uint32_t control)
-{
-       if (layers & MLX5_FLOW_LAYER_L4)
-               attr->priority = MLX5_FLOW_PRIO_L4;
-       else if (layers & MLX5_FLOW_LAYER_L3)
-               attr->priority = MLX5_FLOW_PRIO_L3;
-       else
-               attr->priority = MLX5_FLOW_PRIO_L2;
-       if (control)
-               attr->priority += MLX5_FLOW_CTRL_PRIO_OFFSET;
 }
 
 /**
@@ -1239,53 +1512,66 @@ static int
 mlx5_flow_fate_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
               struct rte_flow_error *error)
 {
-       if (flow->verbs.fate & MLX5_FLOW_FATE_DROP) {
-               flow->verbs.hrxq = mlx5_hrxq_drop_new(dev);
-               if (!flow->verbs.hrxq)
-                       return rte_flow_error_set
-                               (error, errno,
-                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                NULL,
-                                "cannot allocate Drop queue");
-       } else if (flow->verbs.fate & MLX5_FLOW_FATE_QUEUE) {
-               struct mlx5_hrxq *hrxq;
-
-               hrxq = mlx5_hrxq_get(dev, rss_hash_default_key,
-                                    rss_hash_default_key_len, 0,
-                                    &flow->queue, 1, 0, 0);
-               if (!hrxq)
-                       hrxq = mlx5_hrxq_new(dev, rss_hash_default_key,
-                                            rss_hash_default_key_len, 0,
-                                            &flow->queue, 1, 0, 0);
-               if (!hrxq)
-                       return rte_flow_error_set(error, rte_errno,
-                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                       NULL,
-                                       "cannot create flow");
-               flow->verbs.hrxq = hrxq;
-       } else {
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                         NULL,
-                                         "no fate found");
+       struct mlx5_flow_verbs *verbs;
+       int err;
+
+       LIST_FOREACH(verbs, &flow->verbs, next) {
+               if (verbs->fate & MLX5_FLOW_FATE_DROP) {
+                       verbs->hrxq = mlx5_hrxq_drop_new(dev);
+                       if (!verbs->hrxq) {
+                               rte_flow_error_set
+                                       (error, errno,
+                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                        NULL,
+                                        "cannot get drop hash queue");
+                               goto error;
+                       }
+               } else {
+                       struct mlx5_hrxq *hrxq;
+
+                       hrxq = mlx5_hrxq_get(dev, flow->key,
+                                            rss_hash_default_key_len,
+                                            verbs->hash_fields,
+                                            (*flow->queue),
+                                            flow->rss.queue_num, 0, 0);
+                       if (!hrxq)
+                               hrxq = mlx5_hrxq_new(dev, flow->key,
+                                                    rss_hash_default_key_len,
+                                                    verbs->hash_fields,
+                                                    (*flow->queue),
+                                                    flow->rss.queue_num, 0, 0);
+                       if (!hrxq) {
+                               rte_flow_error_set
+                                       (error, rte_errno,
+                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                        NULL,
+                                        "cannot get hash queue");
+                               goto error;
+                       }
+                       verbs->hrxq = hrxq;
+               }
+               verbs->flow =
+                       mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
+               if (!verbs->flow) {
+                       rte_flow_error_set(error, errno,
+                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                          NULL,
+                                          "hardware refuses to create flow");
+                       goto error;
+               }
        }
-       mlx5_flow_verbs_priority(flow->verbs.layers, flow->verbs.attr,
-                                flow->attributes.priority == MLX5_FLOW_CTRL);
-       flow->verbs.flow =
-               mlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);
-       if (!flow->verbs.flow) {
-               if (flow->verbs.fate & MLX5_FLOW_FATE_DROP)
-                       mlx5_hrxq_drop_release(dev, flow->verbs.hrxq);
+       return 0;
+error:
+       err = rte_errno; /* Save rte_errno before cleanup. */
+       LIST_FOREACH(verbs, &flow->verbs, next) {
+               if (verbs->fate & MLX5_FLOW_FATE_DROP)
+                       mlx5_hrxq_drop_release(dev, verbs->hrxq);
                else
-                       mlx5_hrxq_release(dev, flow->verbs.hrxq);
-               flow->verbs.hrxq = NULL;
-               return rte_flow_error_set(error, errno,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                         NULL,
-                                         "kernel module refuses to create"
-                                         " flow");
+                       mlx5_hrxq_release(dev, verbs->hrxq);
+               verbs->hrxq = NULL;
        }
-       return 0;
+       rte_errno = err; /* Restore rte_errno. */
+       return -rte_errno;
 }
 
 /**
@@ -1315,42 +1601,43 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
                      const struct rte_flow_action actions[],
                      struct rte_flow_error *error)
 {
-       struct rte_flow *flow;
-       size_t size;
+       struct rte_flow *flow = NULL;
+       size_t size = 0;
        int ret;
 
-       ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+       ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
        if (ret < 0)
                return NULL;
        size = ret;
-       flow = rte_zmalloc(__func__, size, 0);
+       flow = rte_calloc(__func__, 1, size, 0);
        if (!flow) {
                rte_flow_error_set(error, ENOMEM,
                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                   NULL,
-                                  "cannot allocate memory");
+                                  "not enough memory to create flow");
                return NULL;
        }
-       flow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);
-       flow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);
        ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
-       if (ret < 0)
-               goto error;
+       if (ret < 0) {
+               rte_free(flow);
+               return NULL;
+       }
        assert((size_t)ret == size);
        if (!dev->data->dev_started)
                return flow;
        ret = mlx5_flow_fate_apply(dev, flow, error);
-       if (ret < 0)
-               goto error;
+       if (ret < 0) {
+               ret = rte_errno; /* Save rte_errno before cleanup. */
+               if (flow) {
+                       mlx5_flow_fate_remove(dev, flow);
+                       rte_free(flow);
+               }
+               rte_errno = ret; /* Restore rte_errno. */
+               return NULL;
+       }
        mlx5_flow_rxq_mark(dev, flow, 1);
        TAILQ_INSERT_TAIL(list, flow, next);
        return flow;
-error:
-       ret = rte_errno; /* Save rte_errno before cleanup. */
-       mlx5_flow_fate_remove(dev, flow);
-       rte_free(flow);
-       rte_errno = ret; /* Restore rte_errno. */
-       return NULL;
 }
 
 /**
-- 
2.17.0

Reply via email to