Re: [PATCH 2/2] drivers: net: xgene: Clean up all outstanding tx descriptors

2017-08-28 Thread Iyappan Subramanian
Hi Andrew,

On Fri, Aug 25, 2017 at 4:10 PM, Andrew Lunn  wrote:
> On Fri, Aug 25, 2017 at 03:23:30PM -0700, Iyappan Subramanian wrote:
>> When xgene_enet is rmmod'd and there are still outstanding tx descriptors
>> that have been setup but have not completed, it is possible on the next
>> modprobe of the driver to receive the oldest of such tx descriptors. This
>> results in a kernel NULL pointer dereference.
>>
>> This patch attempts to clean up (by tearing down) all outstanding tx
>> descriptors when the xgene_enet driver is being rmmod'd.
>>
>> Given that, on the next modprobe it should be safe to ignore any such tx
>> descriptors received that map to a NULL skb pointer.
>
> This does not sound correct. Before the module is allowed to be
> removed, everything needs to be finished. You need to wait for all the
> tx descriptors to be returned before unloading. How can you free the
> memory for the descriptor if it is still in use? How can you free the
> skbuf the descriptor points to, if it is still in use...

Thanks for pointing out the issue.  It is an error, we will fix the issue.

Since the two patches are unrelated, I'm going to post them separately.

Thanks,
Iyappan

>
>   Andrew


Re: [PATCH 2/2] drivers: net: xgene: Clean up all outstanding tx descriptors

2017-08-25 Thread Andrew Lunn
On Fri, Aug 25, 2017 at 03:23:30PM -0700, Iyappan Subramanian wrote:
> When xgene_enet is rmmod'd and there are still outstanding tx descriptors
> that have been setup but have not completed, it is possible on the next
> modprobe of the driver to receive the oldest of such tx descriptors. This
> results in a kernel NULL pointer dereference.
> 
> This patch attempts to clean up (by tearing down) all outstanding tx
> descriptors when the xgene_enet driver is being rmmod'd.
> 
> Given that, on the next modprobe it should be safe to ignore any such tx
> descriptors received that map to a NULL skb pointer.

This does not sound correct. Before the module is allowed to be
removed, everything needs to be finished. You need to wait for all the
tx descriptors to be returned before unloading. How can you free the
memory for the descriptor if it is still in use? How can you free the
skbuf the descriptor points to, if it is still in use... 

  Andrew


[PATCH 2/2] drivers: net: xgene: Clean up all outstanding tx descriptors

2017-08-25 Thread Iyappan Subramanian
When xgene_enet is rmmod'd and there are still outstanding tx descriptors
that have been setup but have not completed, it is possible on the next
modprobe of the driver to receive the oldest of such tx descriptors. This
results in a kernel NULL pointer dereference.

This patch attempts to clean up (by tearing down) all outstanding tx
descriptors when the xgene_enet driver is being rmmod'd.

Given that, on the next modprobe it should be safe to ignore any such tx
descriptors received that map to a NULL skb pointer.

Additionally this patch removes redundant call to dev_kfree_skb_any() from
xgene_enet_setup_tx_desc(). The only caller of xgene_enet_setup_tx_desc()
will call dev_kfree_skb_any() upon return of an error. Nothing is gained by
calling it twice in a row.

Signed-off-by: Iyappan Subramanian 
Signed-off-by: Dean Nelson 
Tested-by: Quan Nguyen 
---
 drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 120 +--
 1 file changed, 89 insertions(+), 31 deletions(-)

diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c 
b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 6e253d9..76e2903 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -237,22 +237,24 @@ static irqreturn_t xgene_enet_rx_irq(const int irq, void 
*data)
return IRQ_HANDLED;
 }
 
-static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
-   struct xgene_enet_raw_desc *raw_desc)
+static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring,
+   u16 skb_index)
 {
-   struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
-   struct sk_buff *skb;
+   return &ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
+}
+
+static void xgene_enet_teardown_tx_desc(struct xgene_enet_desc_ring *cp_ring,
+   struct xgene_enet_raw_desc *raw_desc,
+   struct xgene_enet_raw_desc *exp_desc,
+   struct sk_buff *skb,
+   u16 skb_index)
+{
+   dma_addr_t dma_addr, *frag_dma_addr;
struct device *dev;
skb_frag_t *frag;
-   dma_addr_t *frag_dma_addr;
-   u16 skb_index;
-   u8 mss_index;
-   u8 status;
int i;
 
-   skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
-   skb = cp_ring->cp_skb[skb_index];
-   frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
+   frag_dma_addr = xgene_get_frag_dma_array(cp_ring, skb_index);
 
dev = ndev_to_dev(cp_ring->ndev);
dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
@@ -265,6 +267,36 @@ static int xgene_enet_tx_completion(struct 
xgene_enet_desc_ring *cp_ring,
   DMA_TO_DEVICE);
}
 
+   if (exp_desc && GET_VAL(LL_BYTES_LSB, le64_to_cpu(raw_desc->m2))) {
+   dma_addr = GET_VAL(DATAADDR, le64_to_cpu(exp_desc->m2));
+   dma_unmap_single(dev, dma_addr, sizeof(u64) * MAX_EXP_BUFFS,
+DMA_TO_DEVICE);
+   }
+
+   dev_kfree_skb_any(skb);
+}
+
+static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
+   struct xgene_enet_raw_desc *raw_desc,
+   struct xgene_enet_raw_desc *exp_desc)
+{
+   struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
+   struct sk_buff *skb;
+   u16 skb_index;
+   u8 status;
+   u8 mss_index;
+
+   skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
+   skb = cp_ring->cp_skb[skb_index];
+   if (unlikely(!skb)) {
+   netdev_err(cp_ring->ndev, "completion skb is NULL\n");
+   return -EIO;
+   }
+   cp_ring->cp_skb[skb_index] = NULL;
+
+   xgene_enet_teardown_tx_desc(cp_ring, raw_desc, exp_desc, skb,
+   skb_index);
+
if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
spin_lock(&pdata->mss_lock);
@@ -279,12 +311,6 @@ static int xgene_enet_tx_completion(struct 
xgene_enet_desc_ring *cp_ring,
cp_ring->tx_errors++;
}
 
-   if (likely(skb)) {
-   dev_kfree_skb_any(skb);
-   } else {
-   netdev_err(cp_ring->ndev, "completion skb is NULL\n");
-   }
-
return 0;
 }
 
@@ -412,11 +438,6 @@ static __le64 *xgene_enet_get_exp_bufs(struct 
xgene_enet_desc_ring *ring)
return exp_bufs;
 }
 
-static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
-{
-   return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
-}
-
 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
struct sk_buff *skb)
 {
@@ -473,7 +494,8