Re: [PATCH 1/2] net: mvneta: split rxq/txq init into SW and HW parts

2018-03-30 Thread Jisheng Zhang
Hi,

On Thu, 29 Mar 2018 13:42:59 +0200 Thomas Petazzoni wrote:

> Hello,
> 
> On Thu, 29 Mar 2018 18:13:56 +0800, Jisheng Zhang wrote:
> > This is to prepare the suspend/resume improvement in next patch. The
> > SW parts can be optimized out during resume.
> > 
> > Signed-off-by: Jisheng Zhang   
> 
> Thanks, I have two very minor nits below, but otherwise:
> 
> Acked-by: Thomas Petazzoni 

Thanks for reviewing.

> 
> > +/* Create a specified RX queue */
> > +static int mvneta_rxq_init(struct mvneta_port *pp,
> > +  struct mvneta_rx_queue *rxq)
> > +
> > +{
> > +   int ret;
> > +
> > +   ret = mvneta_rxq_sw_init(pp, rxq);
> > +   if (ret)  
> 
> Here you're testing if (ret), while in mvneta_txq_init(), in the same
> situation, you're doing if (ret < 0). I don't have a preference for one
> or the other, but having them consistent between the two lpaces would
> be nice.

updated in v2.

> 
> > -/* Create and initialize a tx queue */
> > -static int mvneta_txq_init(struct mvneta_port *pp,
> > -  struct mvneta_tx_queue *txq)
> > +static int mvneta_txq_sw_init(struct mvneta_port *pp,
> > + struct mvneta_tx_queue *txq)
> >  {
> > int cpu;
> >  
> > @@ -2872,7 +2889,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
> > txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
> > txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
> >  
> > -  
> 
> Spurious change.

There's an extra blank line here, so I removed it ;)

Thanks


Re: [PATCH 1/2] net: mvneta: split rxq/txq init into SW and HW parts

2018-03-29 Thread Thomas Petazzoni
Hello,

On Thu, 29 Mar 2018 18:13:56 +0800, Jisheng Zhang wrote:
> This is to prepare the suspend/resume improvement in next patch. The
> SW parts can be optimized out during resume.
> 
> Signed-off-by: Jisheng Zhang 

Thanks, I have two very minor nits below, but otherwise:

Acked-by: Thomas Petazzoni 

> +/* Create a specified RX queue */
> +static int mvneta_rxq_init(struct mvneta_port *pp,
> +struct mvneta_rx_queue *rxq)
> +
> +{
> + int ret;
> +
> + ret = mvneta_rxq_sw_init(pp, rxq);
> + if (ret)

Here you're testing if (ret), while in mvneta_txq_init(), in the same
situation, you're doing if (ret < 0). I don't have a preference for one
or the other, but having them consistent between the two lpaces would
be nice.

> -/* Create and initialize a tx queue */
> -static int mvneta_txq_init(struct mvneta_port *pp,
> -struct mvneta_tx_queue *txq)
> +static int mvneta_txq_sw_init(struct mvneta_port *pp,
> +   struct mvneta_tx_queue *txq)
>  {
>   int cpu;
>  
> @@ -2872,7 +2889,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
>   txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
>   txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
>  
> -

Spurious change.

Thanks!

Thomas Petazzoni
-- 
Thomas Petazzoni, CTO, Bootlin (formerly Free Electrons)
Embedded Linux and Kernel engineering
https://bootlin.com


[PATCH 1/2] net: mvneta: split rxq/txq init into SW and HW parts

2018-03-29 Thread Jisheng Zhang
This is to prepare the suspend/resume improvement in next patch. The
SW parts can be optimized out during resume.

Signed-off-by: Jisheng Zhang 
---
 drivers/net/ethernet/marvell/mvneta.c | 70 ++-
 1 file changed, 53 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
index 30aab9bf77cc..4ec69bbd1eb4 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2796,10 +2796,8 @@ static void mvneta_rx_reset(struct mvneta_port *pp)
 
 /* Rx/Tx queue initialization/cleanup methods */
 
-/* Create a specified RX queue */
-static int mvneta_rxq_init(struct mvneta_port *pp,
-  struct mvneta_rx_queue *rxq)
-
+static int mvneta_rxq_sw_init(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
 {
rxq->size = pp->rx_ring_size;
 
@@ -2812,6 +2810,12 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
 
rxq->last_desc = rxq->size - 1;
 
+   return 0;
+}
+
+static void mvneta_rxq_hw_init(struct mvneta_port *pp,
+  struct mvneta_rx_queue *rxq)
+{
/* Set Rx descriptors queue starting address */
mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
@@ -2835,6 +2839,20 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
mvneta_rxq_short_pool_set(pp, rxq);
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
}
+}
+
+/* Create a specified RX queue */
+static int mvneta_rxq_init(struct mvneta_port *pp,
+  struct mvneta_rx_queue *rxq)
+
+{
+   int ret;
+
+   ret = mvneta_rxq_sw_init(pp, rxq);
+   if (ret)
+   return ret;
+
+   mvneta_rxq_hw_init(pp, rxq);
 
return 0;
 }
@@ -2857,9 +2875,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
rxq->descs_phys= 0;
 }
 
-/* Create and initialize a tx queue */
-static int mvneta_txq_init(struct mvneta_port *pp,
-  struct mvneta_tx_queue *txq)
+static int mvneta_txq_sw_init(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
 {
int cpu;
 
@@ -2872,7 +2889,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
 
-
/* Allocate memory for TX descriptors */
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2882,14 +2898,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 
txq->last_desc = txq->size - 1;
 
-   /* Set maximum bandwidth for enabled TXQs */
-   mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ff);
-   mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fff);
-
-   /* Set Tx descriptors queue starting address */
-   mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
-   mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
-
txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
GFP_KERNEL);
if (!txq->tx_skb) {
@@ -2910,7 +2918,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
  txq->descs, txq->descs_phys);
return -ENOMEM;
}
-   mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
 
/* Setup XPS mapping */
if (txq_number > 1)
@@ -2923,6 +2930,35 @@ static int mvneta_txq_init(struct mvneta_port *pp,
return 0;
 }
 
+static void mvneta_txq_hw_init(struct mvneta_port *pp,
+  struct mvneta_tx_queue *txq)
+{
+   /* Set maximum bandwidth for enabled TXQs */
+   mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ff);
+   mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fff);
+
+   /* Set Tx descriptors queue starting address */
+   mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
+   mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
+
+   mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+}
+
+/* Create and initialize a tx queue */
+static int mvneta_txq_init(struct mvneta_port *pp,
+  struct mvneta_tx_queue *txq)
+{
+   int ret;
+
+   ret = mvneta_txq_sw_init(pp, txq);
+   if (ret < 0)
+   return ret;
+
+   mvneta_txq_hw_init(pp, txq);
+
+   return 0;
+}
+
 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
 static void mvneta_txq_deinit(struct mvneta_port *pp,
  struct mvneta_tx_queue *txq)
-- 
2.16.3