This patch adds ability to route packets according to source, destination 
ip/ports, L4 proto and pool to certain queue.
---
 lib/librte_ether/rte_ethdev.c           |  81 ++++++++++++++++++++++
 lib/librte_ether/rte_ethdev.h           |  96 ++++++++++++++++++++++++++
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h |   8 +++
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c     | 115 ++++++++++++++++++++++++++++++++
 4 files changed, 300 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 4597176..6cf838b 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1195,6 +1195,87 @@ rte_eth_dev_get_vlan_offload(uint8_t port_id)
 }

 int
+rte_eth_dev_5tuple_add_filter(uint8_t port_id, uint8_t filter_id, struct 
rte_eth_5tuple_filter *filter)
+{
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_info info;
+
+       rte_eth_dev_info_get(port_id, &info);
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       if (filter == NULL) {
+               PMD_DEBUG_TRACE("Invalid filter pointer\n");
+               return (-EINVAL);
+       }
+
+       if(filter->rx_queue >= info.max_rx_queues) {
+               PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", filter->rx_queue);
+               return (-EINVAL);
+       }
+
+       if ((filter->proto == RTE_5TUPLE_PROTO_OTHER) && (filter->mask & 
(ETH_5TUPLE_MASK_SRCPORT|ETH_5TUPLE_MASK_DSTPORT))) {
+               PMD_DEBUG_TRACE(" L4 protocol not TCP, UDP or SCTP, ports are 
meaningless /n");
+               return(-EINVAL);
+       }
+
+       if (filter->mask & ETH_5TUPLE_MASK_POOL) {
+               if(dev->data->dev_conf.rxmode.mq_mode < ETH_MQ_RX_VMDQ_ONLY) {
+                       PMD_DEBUG_TRACE("Port %d is in non-VT mode\n", port_id);
+                       return (-EINVAL);
+               }
+               if(filter->pool >= 
dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf.nb_queue_pools) {
+                       PMD_DEBUG_TRACE("Invalid pool number %d\n", 
filter->pool);
+                       return (-EINVAL);
+               }
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->add_5tuple_filter, -ENOTSUP);
+       return (*dev->dev_ops->add_5tuple_filter)(dev, filter_id, filter);
+}
+
+int
+rte_eth_dev_5tuple_get_filter(uint8_t port_id, uint8_t filter_id, struct 
rte_eth_5tuple_filter *filter)
+{
+        struct rte_eth_dev *dev;
+
+        if (port_id >= nb_ports) {
+                PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+                return (-ENODEV);
+        }
+
+        dev = &rte_eth_devices[port_id];
+
+        if (filter == NULL) {
+                PMD_DEBUG_TRACE("Invalid filter pointer\n");
+                return (-EINVAL);
+        }
+
+        FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_5tuple_filter, -ENOTSUP);
+        return (*dev->dev_ops->get_5tuple_filter)(dev, filter_id, filter);
+}
+
+int
+rte_eth_dev_5tuple_del_filter(uint8_t port_id, uint8_t filter_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->del_5tuple_filter, -ENOTSUP);
+       return (*dev->dev_ops->del_5tuple_filter)(dev, filter_id);
+}
+int
 rte_eth_dev_synq_add_filter(uint8_t port_id, struct rte_eth_synq_filter 
*filter)
 {
        struct rte_eth_dev *dev;
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 6b90aed..7f460c8 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -363,6 +363,14 @@ struct rte_eth_rss_conf {
 /* Definitions used for unicast hash  */
 #define ETH_VMDQ_NUM_UC_HASH_ARRAY  128 /**< Maximum nb. of UC hash array. */

+/* Definitions used for 5 tuple filters  */
+#define ETH_5TUPLE_MASK_SRCIP          0x1
+#define ETH_5TUPLE_MASK_DSTIP          0x2
+#define ETH_5TUPLE_MASK_SRCPORT                0x4
+#define ETH_5TUPLE_MASK_DSTPORT                0x8
+#define ETH_5TUPLE_MASK_PROTO          0x10
+#define ETH_5TUPLE_MASK_POOL           0x20
+
 /* Definitions used for L2 Ether type filters  */
 #define ETH_L2ETYPE_UP_EN              0x1
 #define ETH_L2ETYPE_POOL_EN            0x2
@@ -562,6 +570,31 @@ struct rte_eth_pfc_conf {
 };

 /**
+ *  Possible l4type of 5 tuple filters.
+ */
+enum rte_5tuple_proto {
+        RTE_5TUPLE_PROTO_TCP = 0,      /**< TCP. */
+        RTE_5TUPLE_PROTO_UDP,          /**< UDP. */
+        RTE_5TUPLE_PROTO_SCTP,         /**< SCTP. */
+        RTE_5TUPLE_PROTO_OTHER,                /**< Other. */
+};
+
+/**
+ * A structure used to configure Five Tuple Filters
+ */
+struct rte_eth_5tuple_filter {
+       uint32_t                src;
+       uint32_t                dst;
+       uint16_t                src_port;
+       uint16_t                dst_port;
+       enum rte_5tuple_proto   proto;
+       uint8_t                 priority;
+       uint8_t                 pool;
+       uint8_t                 mask;
+       uint8_t                 rx_queue;
+};
+
+/**
  * A structure used to configure SYN Packet queue Filters
  */
 struct rte_eth_synq_filter {
@@ -934,6 +967,15 @@ typedef uint16_t (*eth_tx_burst_t)(void *txq,
                                   uint16_t nb_pkts);
 /**< @internal Send output packets on a transmit queue of an Ethernet device. 
*/

+typedef int (*add_5tuple_filter_t)(struct rte_eth_dev *dev, uint8_t filter_id, 
struct rte_eth_5tuple_filter *filter);
+/**< @internal Setup 5 tuple queue filter */
+
+typedef int (*get_5tuple_filter_t)(struct rte_eth_dev *dev, uint8_t filter_id, 
struct rte_eth_5tuple_filter *filter);
+/**< @internal Get 5 tuple queue filter */
+
+typedef int (*del_5tuple_filter_t)(struct rte_eth_dev *dev, uint8_t filter_id);
+/**< @internal Delete 5 tuple queue filter */
+
 typedef int (*synq_add_filter_t)(struct rte_eth_dev *dev, struct 
rte_eth_synq_filter *filter);
 /**< @internal Setup SYN Packer queue filter */

@@ -1161,6 +1203,12 @@ struct eth_dev_ops {
        eth_set_vf_tx_t            set_vf_tx;  /**< enable/disable a VF 
transmit */
        eth_set_vf_vlan_filter_t   set_vf_vlan_filter;  /**< Set VF VLAN filter 
*/

+       /** Setup a 5 tuple filter. */
+       add_5tuple_filter_t add_5tuple_filter;
+       /** Get a 5 tuple filter. */
+       get_5tuple_filter_t get_5tuple_filter;
+       /** Delete a 5 tuple filter. */
+       del_5tuple_filter_t del_5tuple_filter;
        /** Setup a SYN Packet queue filter. */
        synq_add_filter_t synq_add_filter;
        /** Get a SYN Packet queue filter. */
@@ -2162,6 +2210,54 @@ rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
 #endif

 /**
+ * Setup 5 tuple filter
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param filter_id
+ *   The index of filter rule. Must be in [0..127] range
+ * @param filter
+ *   The pointer to the rte_eth_5tuple_filter structure.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support 5 tuple filters.
+ *   - (-ENODEV)  if *port_id* invalid.
+ *   - (-ENOENT)  if *filter_id* invalid
+ *   - (-EINVAL)  if *filter* invalid.
+*/
+int rte_eth_dev_5tuple_add_filter(uint8_t port_id, uint8_t filter_id, struct 
rte_eth_5tuple_filter *filter);
+
+/**
+ *  Get 5 tuple filter
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param filter_id
+ *   The index of filter rule. Must be in [0..127] range
+ * @param filter
+ *   The pointer to the rte_eth_5tuple_filter structure.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support 5 tuple filters.
+ *   - (-ENODEV)  if *port_id* invalid.
+ *   - (-ENOENT)  if *filter_id* rule inactive
+ *   - (-EINVAL)  if *filter* pointer is NULL
+*/
+int rte_eth_dev_5tuple_get_filter(uint8_t port_id, uint8_t filter_id, struct 
rte_eth_5tuple_filter *filter);
+
+/**
+ * Delete 5 tuple filter
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param filter_id
+ *   The index of filter rule. Must be in [0..127] range
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support 5 tuple filters.
+ *   - (-ENODEV)  if *port_id* invalid.
+ *   - (-ENOENT)  if *filter_id* invalid
+*/
+int rte_eth_dev_5tuple_del_filter(uint8_t port_id, uint8_t filter_id);
+
+/**
  * Setup SYN Packet queue filter
  * @param port_id
  *   The port identifier of the Ethernet device.
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h 
b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
index 8f911ff..b4b3644 100644
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
+++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
@@ -1509,6 +1509,14 @@ enum {
 #define IXGBE_FTQF_PROTOCOL_COMP_MASK  0x0F
 #define IXGBE_FTQF_POOL_MASK_EN                0x40000000
 #define IXGBE_FTQF_QUEUE_ENABLE                0x80000000
+#define IXGBE_FTQF_MAX_PRIORITY                7
+
+#define IXGBE_SDQPF_PORT_MASK          0x0000ffff
+#define IXGBE_SDQPF_DSTPORT_SHIFT      16
+
+#define IXGBE_L34TIMIR_RESERVE         0x00080000
+#define IXGBE_L34TIMIR_QUEUE_MASK      0x7f
+#define IXGBE_L34TIMIR_QUEUE_SHIFT     21

 /* Interrupt clear mask */
 #define IXGBE_IRQ_CLEAR_MASK   0xFFFFFFFF
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c 
b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 8ed637b..3f8a5b1 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -146,6 +146,9 @@ static void ixgbe_add_rar(struct rte_eth_dev *dev, struct 
ether_addr *mac_addr,
                uint32_t index, uint32_t pool);
 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config 
*dcb_config);
+static int ixgbe_5tuple_add_filter(struct rte_eth_dev *dev, uint8_t filter_id, 
struct rte_eth_5tuple_filter *filter);
+static int ixgbe_5tuple_get_filter(struct rte_eth_dev *dev, uint8_t filter_id, 
struct rte_eth_5tuple_filter *filter);
+static int ixgbe_5tuple_del_filter(struct rte_eth_dev *dev, uint8_t filter_id);
 static int ixgbe_synq_add_filter(struct rte_eth_dev *dev, struct 
rte_eth_synq_filter *filter);
 static int ixgbe_synq_get_filter(struct rte_eth_dev *dev, struct 
rte_eth_synq_filter *filter);
 static int ixgbe_synq_del_filter(struct rte_eth_dev *dev);
@@ -285,6 +288,9 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .set_vf_rx            = ixgbe_set_pool_rx,
        .set_vf_tx            = ixgbe_set_pool_tx,
        .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
+       .add_5tuple_filter      = ixgbe_5tuple_add_filter,
+       .get_5tuple_filter      = ixgbe_5tuple_get_filter,
+       .del_5tuple_filter      = ixgbe_5tuple_del_filter,
        .synq_add_filter          = ixgbe_synq_add_filter,
        .synq_get_filter          = ixgbe_synq_get_filter,
        .synq_del_filter          = ixgbe_synq_del_filter,
@@ -3074,6 +3080,115 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, 
uint8_t rule_id)
 }

 static int
+ixgbe_5tuple_add_filter(struct rte_eth_dev *dev, uint8_t filter_id, struct 
rte_eth_5tuple_filter *filter)
+{
+       struct ixgbe_hw *hw= IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t sdpqf, ftqf, l34timir = 0;
+       uint8_t mask = 0xff;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return (-ENOSYS);
+
+       if (filter_id >= IXGBE_MAX_FTQF_FILTERS)
+               return (-ENOENT);
+
+       if (!(filter->priority && (filter->priority <= 
IXGBE_FTQF_MAX_PRIORITY)))
+               return (-EINVAL);
+
+       ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(filter_id));
+       if (ftqf & IXGBE_FTQF_QUEUE_ENABLE)
+               return(-ENOENT);
+       ftqf = 0;
+
+       sdpqf = ((filter->dst_port << IXGBE_SDQPF_DSTPORT_SHIFT) | 
filter->src_port);
+       ftqf = (filter->proto & IXGBE_FTQF_PROTOCOL_MASK);
+       ftqf |= ((filter->priority & IXGBE_FTQF_PRIORITY_MASK) << 
IXGBE_FTQF_PRIORITY_SHIFT);
+       if (filter->mask & ETH_5TUPLE_MASK_SRCIP)
+               mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+       if (filter->mask & ETH_5TUPLE_MASK_DSTIP)
+               mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+       if (filter->mask & ETH_5TUPLE_MASK_SRCPORT)
+               mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+       if (filter->mask & ETH_5TUPLE_MASK_DSTPORT)
+               mask &= IXGBE_FTQF_DEST_PORT_MASK;
+       if (filter->mask & ETH_5TUPLE_MASK_PROTO)
+               mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+       ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+
+       if (filter->mask & ETH_5TUPLE_MASK_POOL) {
+               if (ixgbe_vmdq_mode_check(hw) < 0)
+                       return (-ENOTSUP);
+               ftqf &= ~IXGBE_FTQF_POOL_MASK_EN;
+               ftqf |= ((filter->pool & IXGBE_FTQF_POOL_MASK) << 
IXGBE_FTQF_POOL_SHIFT);
+       }
+       ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+       l34timir = IXGBE_L34TIMIR_RESERVE;
+       l34timir |= ((filter->rx_queue & IXGBE_L34TIMIR_QUEUE_MASK) << 
IXGBE_L34TIMIR_QUEUE_SHIFT);
+
+       IXGBE_WRITE_REG(hw, IXGBE_SAQF(filter_id), filter->src);
+       IXGBE_WRITE_REG(hw, IXGBE_DAQF(filter_id), filter->dst);
+       IXGBE_WRITE_REG(hw, IXGBE_SDPQF(filter_id), sdpqf);
+       IXGBE_WRITE_REG(hw, IXGBE_FTQF(filter_id), ftqf);
+       IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(filter_id), l34timir);
+       return 0;
+}
+
+static int
+ixgbe_5tuple_get_filter(struct rte_eth_dev *dev, uint8_t filter_id, struct 
rte_eth_5tuple_filter *filter)
+{
+       struct ixgbe_hw *hw= IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t sdpqf, ftqf, l34timir;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return (-ENOSYS);
+
+       if (filter_id >= IXGBE_MAX_FTQF_FILTERS)
+               return (-ENOENT);
+
+       ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(filter_id));
+       if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) {
+               filter->proto = (enum rte_5tuple_proto)(ftqf & 
IXGBE_FTQF_PROTOCOL_MASK);
+               filter->priority = (ftqf >> IXGBE_FTQF_PRIORITY_SHIFT) & 
IXGBE_FTQF_PRIORITY_MASK;
+               filter->mask = ~(ftqf >> IXGBE_FTQF_5TUPLE_MASK_SHIFT) & 
IXGBE_FTQF_5TUPLE_MASK_MASK;
+               if (!(ftqf & IXGBE_FTQF_POOL_MASK_EN)) {
+                       filter->pool = (ftqf >> IXGBE_FTQF_POOL_SHIFT) & 
IXGBE_FTQF_POOL_MASK;
+                       filter->mask |= ETH_5TUPLE_MASK_POOL;
+               }
+               filter->src = IXGBE_READ_REG(hw, IXGBE_SAQF(filter_id));
+               filter->dst = IXGBE_READ_REG(hw, IXGBE_DAQF(filter_id));
+               sdpqf = IXGBE_READ_REG(hw, IXGBE_SDPQF(filter_id));
+               filter->src_port = sdpqf & IXGBE_SDQPF_PORT_MASK;
+               filter->dst_port = (sdpqf >> IXGBE_SDQPF_DSTPORT_SHIFT) & 
IXGBE_SDQPF_PORT_MASK;
+               l34timir = IXGBE_READ_REG(hw, IXGBE_L34T_IMIR(filter_id));
+               filter->rx_queue = (l34timir >> IXGBE_L34TIMIR_QUEUE_SHIFT) & 
IXGBE_L34TIMIR_QUEUE_MASK;
+
+               return 0;
+       }
+       return (-ENOENT);
+}
+
+static int
+ixgbe_5tuple_del_filter(struct rte_eth_dev *dev, uint8_t filter_id)
+{
+       struct ixgbe_hw *hw= IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return (-ENOSYS);
+
+       if (filter_id >= IXGBE_MAX_FTQF_FILTERS)
+               return (-ENOENT);
+
+       IXGBE_WRITE_REG(hw, IXGBE_SAQF(filter_id), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_DAQF(filter_id), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_SDPQF(filter_id), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_FTQF(filter_id), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(filter_id), 0);
+
+       return 0;
+}
+
+static int
 ixgbe_synq_add_filter(struct rte_eth_dev *dev, struct rte_eth_synq_filter 
*filter)
 {
        struct ixgbe_hw *hw= IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-- 
1.8.3.2

Reply via email to