[PATCH 3.16 165/366] xen-netfront: use different locks for Rx and Tx stats

2018-11-11 Thread Ben Hutchings
3.16.61-rc1 review patch.  If anyone has any objections, please let me know.

--

From: David Vrabel 

commit 900e183301b54f8ca17a86d9835e9569090d182a upstream.

In netfront the Rx and Tx path are independent and use different
locks.  The Tx lock is held with hard irqs disabled, but Rx lock is
held with only BH disabled.  Since both sides use the same stats lock,
a deadlock may occur.

  [ INFO: possible irq lock inversion dependency detected ]
  3.16.2 #16 Not tainted
  -
  swapper/0/0 just changed the state of lock:
   (&(>tx_lock)->rlock){-.}, at: []
  xennet_tx_interrupt+0x14/0x34
  but this lock took another, HARDIRQ-unsafe lock in the past:
   (>syncp.seq#2){+.-...}
  and interrupts could create inverse lock ordering between them.
  other info that might help us debug this:
   Possible interrupt unsafe locking scenario:

 CPU0CPU1
 
lock(>syncp.seq#2);
 local_irq_disable();
 lock(&(>tx_lock)->rlock);
 lock(>syncp.seq#2);

  lock(&(>tx_lock)->rlock);

Using separate locks for the Rx and Tx stats fixes this deadlock.

Reported-by: Dmitry Piotrovsky 
Signed-off-by: David Vrabel 
Signed-off-by: David S. Miller 
Signed-off-by: Ben Hutchings 
---
 drivers/net/xen-netfront.c | 71 ++
 1 file changed, 42 insertions(+), 29 deletions(-)

--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -86,10 +86,8 @@ struct netfront_cb {
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
 struct netfront_stats {
-   u64 rx_packets;
-   u64 tx_packets;
-   u64 rx_bytes;
-   u64 tx_bytes;
+   u64 packets;
+   u64 bytes;
struct u64_stats_sync   syncp;
 };
 
@@ -165,7 +163,8 @@ struct netfront_info {
struct netfront_queue *queues;
 
/* Statistics */
-   struct netfront_stats __percpu *stats;
+   struct netfront_stats __percpu *rx_stats;
+   struct netfront_stats __percpu *tx_stats;
 
atomic_t rx_gso_checksum_fixup;
 };
@@ -588,7 +587,7 @@ static int xennet_start_xmit(struct sk_b
 {
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
-   struct netfront_stats *stats = this_cpu_ptr(np->stats);
+   struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xen_netif_tx_request *tx;
char *data = skb->data;
RING_IDX i;
@@ -695,10 +694,10 @@ static int xennet_start_xmit(struct sk_b
if (notify)
notify_remote_via_irq(queue->tx_irq);
 
-   u64_stats_update_begin(>syncp);
-   stats->tx_bytes += skb->len;
-   stats->tx_packets++;
-   u64_stats_update_end(>syncp);
+   u64_stats_update_begin(_stats->syncp);
+   tx_stats->bytes += skb->len;
+   tx_stats->packets++;
+   u64_stats_update_end(_stats->syncp);
 
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(queue);
@@ -954,7 +953,7 @@ static int checksum_setup(struct net_dev
 static int handle_incoming_queue(struct netfront_queue *queue,
 struct sk_buff_head *rxq)
 {
-   struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
+   struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
int packets_dropped = 0;
struct sk_buff *skb;
 
@@ -975,10 +974,10 @@ static int handle_incoming_queue(struct
continue;
}
 
-   u64_stats_update_begin(>syncp);
-   stats->rx_packets++;
-   stats->rx_bytes += skb->len;
-   u64_stats_update_end(>syncp);
+   u64_stats_update_begin(_stats->syncp);
+   rx_stats->packets++;
+   rx_stats->bytes += skb->len;
+   u64_stats_update_end(_stats->syncp);
 
/* Pass it up. */
napi_gro_receive(>napi, skb);
@@ -1113,18 +1112,22 @@ static struct rtnl_link_stats64 *xennet_
int cpu;
 
for_each_possible_cpu(cpu) {
-   struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+   struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, 
cpu);
+   struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, 
cpu);
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
unsigned int start;
 
do {
-   start = u64_stats_fetch_begin_irq(>syncp);
+   start = u64_stats_fetch_begin_irq(_stats->syncp);
+   tx_packets = tx_stats->packets;
+   tx_bytes = tx_stats->bytes;
+   } while 

[PATCH 3.16 165/366] xen-netfront: use different locks for Rx and Tx stats

2018-11-11 Thread Ben Hutchings
3.16.61-rc1 review patch.  If anyone has any objections, please let me know.

--

From: David Vrabel 

commit 900e183301b54f8ca17a86d9835e9569090d182a upstream.

In netfront the Rx and Tx path are independent and use different
locks.  The Tx lock is held with hard irqs disabled, but Rx lock is
held with only BH disabled.  Since both sides use the same stats lock,
a deadlock may occur.

  [ INFO: possible irq lock inversion dependency detected ]
  3.16.2 #16 Not tainted
  -
  swapper/0/0 just changed the state of lock:
   (&(>tx_lock)->rlock){-.}, at: []
  xennet_tx_interrupt+0x14/0x34
  but this lock took another, HARDIRQ-unsafe lock in the past:
   (>syncp.seq#2){+.-...}
  and interrupts could create inverse lock ordering between them.
  other info that might help us debug this:
   Possible interrupt unsafe locking scenario:

 CPU0CPU1
 
lock(>syncp.seq#2);
 local_irq_disable();
 lock(&(>tx_lock)->rlock);
 lock(>syncp.seq#2);

  lock(&(>tx_lock)->rlock);

Using separate locks for the Rx and Tx stats fixes this deadlock.

Reported-by: Dmitry Piotrovsky 
Signed-off-by: David Vrabel 
Signed-off-by: David S. Miller 
Signed-off-by: Ben Hutchings 
---
 drivers/net/xen-netfront.c | 71 ++
 1 file changed, 42 insertions(+), 29 deletions(-)

--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -86,10 +86,8 @@ struct netfront_cb {
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
 struct netfront_stats {
-   u64 rx_packets;
-   u64 tx_packets;
-   u64 rx_bytes;
-   u64 tx_bytes;
+   u64 packets;
+   u64 bytes;
struct u64_stats_sync   syncp;
 };
 
@@ -165,7 +163,8 @@ struct netfront_info {
struct netfront_queue *queues;
 
/* Statistics */
-   struct netfront_stats __percpu *stats;
+   struct netfront_stats __percpu *rx_stats;
+   struct netfront_stats __percpu *tx_stats;
 
atomic_t rx_gso_checksum_fixup;
 };
@@ -588,7 +587,7 @@ static int xennet_start_xmit(struct sk_b
 {
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
-   struct netfront_stats *stats = this_cpu_ptr(np->stats);
+   struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xen_netif_tx_request *tx;
char *data = skb->data;
RING_IDX i;
@@ -695,10 +694,10 @@ static int xennet_start_xmit(struct sk_b
if (notify)
notify_remote_via_irq(queue->tx_irq);
 
-   u64_stats_update_begin(>syncp);
-   stats->tx_bytes += skb->len;
-   stats->tx_packets++;
-   u64_stats_update_end(>syncp);
+   u64_stats_update_begin(_stats->syncp);
+   tx_stats->bytes += skb->len;
+   tx_stats->packets++;
+   u64_stats_update_end(_stats->syncp);
 
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(queue);
@@ -954,7 +953,7 @@ static int checksum_setup(struct net_dev
 static int handle_incoming_queue(struct netfront_queue *queue,
 struct sk_buff_head *rxq)
 {
-   struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
+   struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
int packets_dropped = 0;
struct sk_buff *skb;
 
@@ -975,10 +974,10 @@ static int handle_incoming_queue(struct
continue;
}
 
-   u64_stats_update_begin(>syncp);
-   stats->rx_packets++;
-   stats->rx_bytes += skb->len;
-   u64_stats_update_end(>syncp);
+   u64_stats_update_begin(_stats->syncp);
+   rx_stats->packets++;
+   rx_stats->bytes += skb->len;
+   u64_stats_update_end(_stats->syncp);
 
/* Pass it up. */
napi_gro_receive(>napi, skb);
@@ -1113,18 +1112,22 @@ static struct rtnl_link_stats64 *xennet_
int cpu;
 
for_each_possible_cpu(cpu) {
-   struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+   struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, 
cpu);
+   struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, 
cpu);
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
unsigned int start;
 
do {
-   start = u64_stats_fetch_begin_irq(>syncp);
+   start = u64_stats_fetch_begin_irq(_stats->syncp);
+   tx_packets = tx_stats->packets;
+   tx_bytes = tx_stats->bytes;
+   } while