[dpdk-dev] [PATCH] ixgbe: fix data access on big endian cpu

2015-03-24 Thread Xuelin Shi
Hi Thomas,

Done. http://patchwork.dpdk.org/dev/patchwork/patch/4123/

Thanks,
Xuelin Shi

> -Original Message-
> From: Thomas Monjalon [mailto:thomas.monjalon at 6wind.com]
> Sent: Monday, March 23, 2015 22:02
> To: Shi Xuelin-B29237
> Cc: dev at dpdk.org; konstantin.ananyev at intel.com; helin.zhang at intel.com
> Subject: Re: [PATCH] ixgbe: fix data access on big endian cpu
> 
> 2015-03-03 16:27, xuelin.shi at freescale.com:
> > From: Xuelin Shi 
> >
> > enforce rules for cpu and ixgbe exchanging data.
> > 1. cpu use data owned by ixgbe must use rte_le_to_cpu_xx(...) 2. cpu
> > fill data to ixgbe must use rte_cpu_to_le_xx(...)
> >
> > Signed-off-by: Xuelin Shi 
> 
> Please Xuelin, could you rebase on HEAD and fix these checkpatch errors?
> 
> ERROR:SPACING: space prohibited after that '!' (ctx:BxW)
> 
> ERROR:CODE_INDENT: code indent should use tabs where possible
> +^I^I ^I   ^I  IXGBE_RXDADV_STAT_DD)) {$
> 
> Thanks


[dpdk-dev] [PATCH] ixgbe: fix data access on big endian cpu

2015-03-23 Thread Thomas Monjalon
2015-03-03 16:27, xuelin.shi at freescale.com:
> From: Xuelin Shi 
> 
> enforce rules for cpu and ixgbe exchanging data.
> 1. cpu use data owned by ixgbe must use rte_le_to_cpu_xx(...)
> 2. cpu fill data to ixgbe must use rte_cpu_to_le_xx(...)
> 
> Signed-off-by: Xuelin Shi 

Please Xuelin, could you rebase on HEAD and fix these checkpatch errors?

ERROR:SPACING: space prohibited after that '!' (ctx:BxW)

ERROR:CODE_INDENT: code indent should use tabs where possible
+^I^I ^I   ^I  IXGBE_RXDADV_STAT_DD)) {$

Thanks


[dpdk-dev] [PATCH] ixgbe: fix data access on big endian cpu

2015-03-03 Thread xuelin....@freescale.com
From: Xuelin Shi 

enforce rules for cpu and ixgbe exchanging data.
1. cpu use data owned by ixgbe must use rte_le_to_cpu_xx(...)
2. cpu fill data to ixgbe must use rte_cpu_to_le_xx(...)

Signed-off-by: Xuelin Shi 
---
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 115 --
 1 file changed, 72 insertions(+), 43 deletions(-)

diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index e6766b3..fb01a4a 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -140,7 +140,7 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
int i;

/* check DD bit on threshold descriptor */
-   status = txq->tx_ring[txq->tx_next_dd].wb.status;
+   status = rte_le_to_cpu_32(txq->tx_ring[txq->tx_next_dd].wb.status);
if (! (status & IXGBE_ADVTXD_STAT_DD))
return 0;

@@ -186,11 +186,14 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct 
rte_mbuf **pkts)
pkt_len = (*pkts)->data_len;

/* write data to descriptor */
-   txdp->read.buffer_addr = buf_dma_addr;
+   txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
txdp->read.cmd_type_len =
-   ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+   rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
txdp->read.olinfo_status =
-   (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+   rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
rte_prefetch0(&(*pkts)->pool);
}
 }
@@ -206,11 +209,14 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct 
rte_mbuf **pkts)
pkt_len = (*pkts)->data_len;

/* write data to descriptor */
-   txdp->read.buffer_addr = buf_dma_addr;
+   txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
txdp->read.cmd_type_len =
-   ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+   rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
txdp->read.olinfo_status =
-   (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+   rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
rte_prefetch0(&(*pkts)->pool);
 }

@@ -297,7 +303,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 * a divisor of the ring size
 */
tx_r[txq->tx_next_rs].read.cmd_type_len |=
-   rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+   rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);

txq->tx_tail = 0;
@@ -316,7 +322,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 */
if (txq->tx_tail > txq->tx_next_rs) {
tx_r[txq->tx_next_rs].read.cmd_type_len |=
-   rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+   rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
txq->tx_rs_thresh);
if (txq->tx_next_rs >= txq->nb_tx_desc)
@@ -517,6 +523,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
uint16_t nb_tx_to_clean;
+   uint32_t stat;

/* Determine the last descriptor needing to be cleaned */
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
@@ -525,7 +532,9 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)

/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-   if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+
+   stat = rte_le_to_cpu_32(txr[desc_to_clean_to].wb.status);
+   if (! (stat & IXGBE_TXD_STAT_DD))
{
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
@@ -556,7 +565,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
 * up to the last descriptor with the RS bit set
 * are done. Only reset the threshold descriptor.
 */
-   txr[desc_to_clean_to].wb.status = 0;
+   txr[desc_to_clean_to].wb.status = rte_cpu_to_le_32(0);

/* Update the txq to reflect the last descriptor that was cleaned */
txq->last_desc_cleaned = desc_to_clean_to;
@@ -813,12 +822,14 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 */
slen = m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+
txd->read.buffer_addr =
-   rte_cpu_to_le_64(buf_dma_addr);
+   rte_cpu_to_le_64(buf_dma_addr);