On 11/14, Yahui Cao wrote: >To make sure if FDIR programming succeed or fail, legacy programming >status descriptor WB format is enabled and FDIR queue irq is opened. > >Fixes: 84dc7a95a2d3 ("net/ice: enable flow director engine") >Cc: beilei.x...@intel.com > >Signed-off-by: Yahui Cao <yahui....@intel.com> >Signed-off-by: Beilei Xing <beilei.x...@intel.com> >--- > drivers/net/ice/ice_ethdev.c | 1 + > drivers/net/ice/ice_rxtx.c | 93 +++++++++++++++++++++++++++++++++--- > 2 files changed, 88 insertions(+), 6 deletions(-) > >diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c >index 3b20ea423..27b0fbc83 100644 >--- a/drivers/net/ice/ice_ethdev.c >+++ b/drivers/net/ice/ice_ethdev.c >@@ -2726,6 +2726,7 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev) > > /* Enable FDIR MSIX interrupt */ > if (pf->fdir.fdir_vsi) { >+ pf->fdir.fdir_vsi->nb_used_qps = 1; > ice_vsi_queues_bind_intr(pf->fdir.fdir_vsi); > ice_vsi_enable_queues_intr(pf->fdir.fdir_vsi); > } >diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c >index 18c02979e..2db174456 100644 >--- a/drivers/net/ice/ice_rxtx.c >+++ b/drivers/net/ice/ice_rxtx.c >@@ -535,7 +535,7 @@ ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq) > { > struct ice_vsi *vsi = rxq->vsi; > struct ice_hw *hw = ICE_VSI_TO_HW(vsi); >- uint32_t rxdid = ICE_RXDID_COMMS_GENERIC; >+ uint32_t rxdid = ICE_RXDID_LEGACY_1; > struct ice_rlan_ctx rx_ctx; > enum ice_status err; > uint32_t regval; >@@ -550,9 +550,7 @@ ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq) > rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; > rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; > rx_ctx.dtype = 0; /* No Header Split mode */ >-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC > rx_ctx.dsize = 1; /* 32B descriptors */ >-#endif > rx_ctx.rxmax = RTE_ETHER_MAX_LEN; > /* TPH: Transaction Layer Packet (TLP) processing hints */ > rx_ctx.tphrdesc_ena = 1; >@@ -2077,7 +2075,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf) > } > > /* Allocate RX hardware ring descriptors. */ >- ring_size = sizeof(union ice_rx_flex_desc) * ICE_FDIR_NUM_RX_DESC; >+ ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC; > ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); > > rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring", >@@ -2096,7 +2094,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf) > > rxq->rx_ring_dma = rz->iova; > memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC * >- sizeof(union ice_rx_flex_desc)); >+ sizeof(union ice_32byte_rx_desc)); > rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr; > > /* >@@ -3607,12 +3605,81 @@ ice_set_default_ptype_table(struct rte_eth_dev *dev) > ad->ptype_tbl[i] = ice_get_default_pkt_type(i); > } > >+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1 >+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \ >+ (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S) >+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0 >+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1 >+ >+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4 >+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \ >+ (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S) >+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5 >+#define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \ >+ (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S) >+ >+/* >+ * check the programming status descriptor in rx queue. >+ * done after Programming Flow Director is programmed on >+ * tx queue >+ */ >+static inline int >+ice_check_fdir_programming_status(struct ice_rx_queue *rxq) >+{ >+ volatile union ice_32byte_rx_desc *rxdp; >+ uint64_t qword1; >+ uint32_t rx_status; >+ uint32_t error; >+ uint32_t id; >+ int ret = -EAGAIN; >+ >+ rxdp = (volatile union ice_32byte_rx_desc *) >+ (&rxq->rx_ring[rxq->rx_tail]); >+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); >+ rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >+ >> ICE_RXD_QW1_STATUS_S; >+ >+ if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) { >+ ret = 0; >+ error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >> >+ ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S; >+ id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >> >+ ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S; >+ if (error) { >+ if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD) >+ PMD_DRV_LOG(ERR, "Failed to add FDIR rule."); >+ else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL) >+ PMD_DRV_LOG(ERR, "Failed to remove FDIR rule."); >+ ret = -EINVAL; >+ goto err; >+ } >+ error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >> >+ ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S; >+ if (error) { >+ PMD_DRV_LOG(ERR, "Failed to create FDIR profile."); >+ ret = -EINVAL; >+ } >+err: >+ rxdp->wb.qword1.status_error_len = 0; >+ rxq->rx_tail++; >+ if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) >+ rxq->rx_tail = 0; >+ if (rxq->rx_tail == 0) >+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); >+ else >+ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1); >+ } >+ >+ return ret; >+} >+ > #define ICE_FDIR_MAX_WAIT_US 10000 > > int > ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc) > { > struct ice_tx_queue *txq = pf->fdir.txq; >+ struct ice_rx_queue *rxq = pf->fdir.rxq; > volatile struct ice_fltr_desc *fdirdp; > volatile struct ice_tx_desc *txdp; > uint32_t td_cmd; >@@ -3650,5 +3717,19 @@ ice_fdir_programming(struct ice_pf *pf, struct >ice_fltr_desc *fdir_desc) > return -ETIMEDOUT; > } > >- return 0; >+ for (; i < ICE_FDIR_MAX_WAIT_US; i++) { >+ int ret; >+ >+ ret = ice_check_fdir_programming_status(rxq); >+ if (ret == -EAGAIN) >+ rte_delay_us(1); >+ else >+ return ret; >+ } >+ >+ PMD_DRV_LOG(ERR, >+ "Failed to program FDIR filter: programming status >reported."); >+ return -ETIMEDOUT; >+ >+ > } >-- >2.17.1 >
Acked-by: Xiaolong Ye <xiaolong...@intel.com> Applied to dpdk-next-net-intel, Thanks.