Signed-off-by: Nelio Laranjeiro <nelio.laranje...@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 103 +++++++++++++++++++++++++++++++++++
 1 file changed, 103 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 003c4aadd..6f3e50452 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -554,6 +554,106 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, 
struct rte_flow *flow,
        return 0;
 }
 
+/**
+ * Validate IPv6 layer and possibly create the Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param flow[in, out]
+ *   Pointer to flow structure.
+ * @param error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
+                   struct rte_flow_error *error)
+{
+       const struct rte_flow_item_ipv6 *spec = item->spec;
+       const struct rte_flow_item_ipv6 *mask = item->mask;
+       const struct rte_flow_item_ipv6 nic_mask = {
+               .hdr = {
+                       .src_addr =
+                               "\xff\xff\xff\xff\xff\xff\xff\xff"
+                               "\xff\xff\xff\xff\xff\xff\xff\xff",
+                       .dst_addr =
+                               "\xff\xff\xff\xff\xff\xff\xff\xff"
+                               "\xff\xff\xff\xff\xff\xff\xff\xff",
+                       .vtc_flow = RTE_BE32(0xffffffff),
+                       .proto = 0xff,
+                       .hop_limits = 0xff,
+               },
+       };
+       unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
+       struct ibv_flow_spec_ipv6 ipv6 = {
+               .type = IBV_FLOW_SPEC_IPV6,
+               .size = size,
+       };
+       int ret;
+
+       if (flow->verbs.layers & MLX5_FLOW_LAYER_L3)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         item,
+                                         "multiple L3 layers not supported");
+       else if (flow->verbs.layers & MLX5_FLOW_LAYER_L4)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         item,
+                                         "L3 cannot follow an L4 layer.");
+       if (!mask)
+               mask = &rte_flow_item_ipv6_mask;
+       ret = mlx5_flow_item_validate(item, (const uint8_t *)mask,
+                                     (const uint8_t *)&nic_mask,
+                                     sizeof(struct rte_flow_item_ipv6), error);
+       if (ret < 0)
+               return ret;
+       if (spec) {
+               unsigned int i;
+               uint32_t vtc_flow_val;
+               uint32_t vtc_flow_mask;
+
+               memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+                      RTE_DIM(ipv6.val.src_ip));
+               memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+                      RTE_DIM(ipv6.val.dst_ip));
+               memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+                      RTE_DIM(ipv6.mask.src_ip));
+               memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+                      RTE_DIM(ipv6.mask.dst_ip));
+               vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
+               vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
+               ipv6.val.flow_label =
+                       rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
+                                        IPV6_HDR_FL_SHIFT);
+               ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
+                                        IPV6_HDR_TC_SHIFT;
+               ipv6.val.next_hdr = spec->hdr.proto;
+               ipv6.val.hop_limit = spec->hdr.hop_limits;
+               ipv6.mask.flow_label =
+                       rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
+                                        IPV6_HDR_FL_SHIFT);
+               ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
+                                         IPV6_HDR_TC_SHIFT;
+               ipv6.mask.next_hdr = mask->hdr.proto;
+               ipv6.mask.hop_limit = mask->hdr.hop_limits;
+               /* Remove unwanted bits from values. */
+               for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
+                       ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
+                       ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
+               }
+               ipv6.val.flow_label &= ipv6.mask.flow_label;
+               ipv6.val.traffic_class &= ipv6.mask.traffic_class;
+               ipv6.val.next_hdr &= ipv6.mask.next_hdr;
+               ipv6.val.hop_limit &= ipv6.mask.hop_limit;
+       }
+       mlx5_flow_spec_verbs_add(flow, &ipv6, size);
+       flow->verbs.layers |= MLX5_FLOW_LAYER_L3_IPV6;
+       return 0;
+}
+
 /**
  * Validate items provided by the user.
  *
@@ -587,6 +687,9 @@ mlx5_flow_items(const struct rte_flow_item items[],
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        ret = mlx5_flow_item_ipv4(items, flow, error);
                        break;
+               case RTE_FLOW_ITEM_TYPE_IPV6:
+                       ret = mlx5_flow_item_ipv6(items, flow, error);
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
-- 
2.17.0

Reply via email to