Re: [PATCH 4/6] net: mvneta: Handle per-cpu interrupts

2015-07-06 Thread Maxime Ripard
Hi Willy,

On Sun, Jul 05, 2015 at 02:37:08PM +0200, Willy Tarreau wrote:
 Hi Maxime,
 
 On Fri, Jul 03, 2015 at 04:25:49PM +0200, Maxime Ripard wrote:
  Now that our interrupt controller is allowing us to use per-CPU interrupts,
  actually use it in the mvneta driver.
  
  This involves obviously reworking the driver to have a CPU-local NAPI
  structure, and report for incoming packet using that structure.
  
  Signed-off-by: Maxime Ripard maxime.rip...@free-electrons.com
 
 This patch breaks module build of mvneta unless you export request_percpu_irq 
 :
 
 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
 index ec31697..1440a92 100644
 --- a/kernel/irq/manage.c
 +++ b/kernel/irq/manage.c
 @@ -1799,6 +1799,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t 
 handler,
  
   return retval;
  }
 +EXPORT_SYMBOL_GPL(request_percpu_irq);

Ah, right. Thanks!

Maxime

-- 
Maxime Ripard, Free Electrons
Embedded Linux, Kernel and Android engineering
http://free-electrons.com


signature.asc
Description: Digital signature


Re: [PATCH 4/6] net: mvneta: Handle per-cpu interrupts

2015-07-05 Thread Willy Tarreau
Hi Maxime,

On Fri, Jul 03, 2015 at 04:25:49PM +0200, Maxime Ripard wrote:
 Now that our interrupt controller is allowing us to use per-CPU interrupts,
 actually use it in the mvneta driver.
 
 This involves obviously reworking the driver to have a CPU-local NAPI
 structure, and report for incoming packet using that structure.
 
 Signed-off-by: Maxime Ripard maxime.rip...@free-electrons.com

This patch breaks module build of mvneta unless you export request_percpu_irq :

diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ec31697..1440a92 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1799,6 +1799,7 @@ int request_percpu_irq(unsigned int irq, irq_handler_t 
handler,
 
return retval;
 }
+EXPORT_SYMBOL_GPL(request_percpu_irq);
 
Regards,
Willy

--
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 4/6] net: mvneta: Handle per-cpu interrupts

2015-07-03 Thread Maxime Ripard
Now that our interrupt controller is allowing us to use per-CPU interrupts,
actually use it in the mvneta driver.

This involves obviously reworking the driver to have a CPU-local NAPI
structure, and report for incoming packet using that structure.

Signed-off-by: Maxime Ripard maxime.rip...@free-electrons.com
---
 drivers/net/ethernet/marvell/mvneta.c | 85 ---
 1 file changed, 58 insertions(+), 27 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
index b7717375ec4d..aedd21ed9532 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -285,7 +285,21 @@ struct mvneta_pcpu_stats {
u64 tx_bytes;
 };
 
+struct mvneta_pcpu_port {
+   /* Pointer to the shared port */
+   struct mvneta_port  *pp;
+
+   /* Pointer to the CPU-local NAPI struct */
+   struct napi_struct  napi;
+
+   /* Cause of the previous interrupt */
+   u32 cause_rx_tx;
+};
+
 struct mvneta_port {
+   struct mvneta_pcpu_port __percpu*ports;
+   struct mvneta_pcpu_stats __percpu   *stats;
+
int pkt_size;
unsigned int frag_size;
void __iomem *base;
@@ -293,15 +307,11 @@ struct mvneta_port {
struct mvneta_tx_queue *txqs;
struct net_device *dev;
 
-   u32 cause_rx_tx;
-   struct napi_struct napi;
-
/* Core clock */
struct clk *clk;
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
-   struct mvneta_pcpu_stats *stats;
 
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
@@ -1454,6 +1464,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
 struct mvneta_rx_queue *rxq)
 {
+   struct mvneta_pcpu_port *port = this_cpu_ptr(pp-ports);
struct net_device *dev = pp-dev;
int rx_done, rx_filled;
u32 rcvd_pkts = 0;
@@ -1508,7 +1519,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
 
skb-protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
-   napi_gro_receive(pp-napi, skb);
+   napi_gro_receive(port-napi, skb);
 
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -1535,7 +1546,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
 
mvneta_rx_csum(pp, rx_status, skb);
 
-   napi_gro_receive(pp-napi, skb);
+   napi_gro_receive(port-napi, skb);
 
/* Refill processing */
err = mvneta_rx_refill(pp, rx_desc);
@@ -2054,12 +2065,11 @@ static void mvneta_set_rx_mode(struct net_device *dev)
 /* Interrupt handling - the callback for request_irq() */
 static irqreturn_t mvneta_isr(int irq, void *dev_id)
 {
-   struct mvneta_port *pp = (struct mvneta_port *)dev_id;
+   struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
 
-   /* Mask all interrupts */
-   mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+   disable_percpu_irq(port-pp-dev-irq);
 
-   napi_schedule(pp-napi);
+   napi_schedule(port-napi);
 
return IRQ_HANDLED;
 }
@@ -2097,11 +2107,11 @@ static int mvneta_poll(struct napi_struct *napi, int 
budget)
 {
int rx_done = 0;
u32 cause_rx_tx;
-   unsigned long flags;
struct mvneta_port *pp = netdev_priv(napi-dev);
+   struct mvneta_pcpu_port *port = this_cpu_ptr(pp-ports);
 
if (!netif_running(pp-dev)) {
-   napi_complete(napi);
+   napi_complete(port-napi);
return rx_done;
}
 
@@ -2128,7 +2138,7 @@ static int mvneta_poll(struct napi_struct *napi, int 
budget)
/* For the case where the last mvneta_poll did not process all
 * RX packets
 */
-   cause_rx_tx |= pp-cause_rx_tx;
+   cause_rx_tx |= port-cause_rx_tx;
if (rxq_number  1) {
while ((cause_rx_tx  MVNETA_RX_INTR_MASK_ALL)  (budget  0)) 
{
int count;
@@ -2159,16 +2169,11 @@ static int mvneta_poll(struct napi_struct *napi, int 
budget)
 
if (budget  0) {
cause_rx_tx = 0;
-   napi_complete(napi);
-   local_irq_save(flags);
-   mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-   MVNETA_RX_INTR_MASK(rxq_number) |
-   MVNETA_TX_INTR_MASK(txq_number) |
-   MVNETA_MISCINTR_INTR_MASK);
-   local_irq_restore(flags);
+   napi_complete(port-napi);
+   enable_percpu_irq(pp-dev-irq, 0);
}
 
-   pp-cause_rx_tx = cause_rx_tx;
+   port-cause_rx_tx = cause_rx_tx;
return rx_done;
 }
 
@@ -2417,6 +2422,8 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
 
 static void