[PATCHv2 2/3] net: velocity: Convert to generic dma functions

2013-04-29 Thread Tony Prisk
Remove the pci_* dma functions and replace with the more generic
versions.

In preparation of adding platform support, a new struct device *dev
is added to struct velocity_info which can be used by both the pci
and platform code.

Signed-off-by: Tony Prisk 
---
 drivers/net/ethernet/via/via-velocity.c |   51 +++
 drivers/net/ethernet/via/via-velocity.h |1 +
 2 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/drivers/net/ethernet/via/via-velocity.c 
b/drivers/net/ethernet/via/via-velocity.c
index 9a408b1..e2c4887 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -46,6 +46,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -1457,7 +1458,6 @@ static int velocity_init_dma_rings(struct velocity_info 
*vptr)
struct velocity_opt *opt = >options;
const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
-   struct pci_dev *pdev = vptr->pdev;
dma_addr_t pool_dma;
void *pool;
unsigned int i;
@@ -1465,13 +1465,13 @@ static int velocity_init_dma_rings(struct velocity_info 
*vptr)
/*
 * Allocate all RD/TD rings a single pool.
 *
-* pci_alloc_consistent() fulfills the requirement for 64 bytes
+* dma_alloc_coherent() fulfills the requirement for 64 bytes
 * alignment
 */
-   pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
-   rx_ring_size, _dma);
+   pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
+   rx_ring_size, _dma, GFP_ATOMIC);
if (!pool) {
-   dev_err(>dev, "%s : DMA memory allocation failed.\n",
+   dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
vptr->netdev->name);
return -ENOMEM;
}
@@ -1522,8 +1522,8 @@ static int velocity_alloc_rx_buf(struct velocity_info 
*vptr, int idx)
 */
skb_reserve(rd_info->skb,
64 - ((unsigned long) rd_info->skb->data & 63));
-   rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
-   vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
+   rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
+   vptr->rx.buf_sz, DMA_FROM_DEVICE);
 
/*
 *  Fill in the descriptor to match
@@ -1586,8 +1586,8 @@ static void velocity_free_rd_ring(struct velocity_info 
*vptr)
 
if (!rd_info->skb)
continue;
-   pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
-PCI_DMA_FROMDEVICE);
+   dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
+DMA_FROM_DEVICE);
rd_info->skb_dma = 0;
 
dev_kfree_skb(rd_info->skb);
@@ -1668,7 +1668,7 @@ static void velocity_free_dma_rings(struct velocity_info 
*vptr)
const int size = vptr->options.numrx * sizeof(struct rx_desc) +
vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
 
-   pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
+   dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
 }
 
 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
@@ -1725,8 +1725,8 @@ static void velocity_free_tx_buf(struct velocity_info 
*vptr,
pktlen = max_t(size_t, pktlen,
td->td_buf[i].size & ~TD_QUEUE);
 
-   pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
-   le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
+   dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
+   le16_to_cpu(pktlen), DMA_TO_DEVICE);
}
}
dev_kfree_skb_irq(skb);
@@ -1748,8 +1748,8 @@ static void velocity_free_td_ring_entry(struct 
velocity_info *vptr,
if (td_info->skb) {
for (i = 0; i < td_info->nskb_dma; i++) {
if (td_info->skb_dma[i]) {
-   pci_unmap_single(vptr->pdev, 
td_info->skb_dma[i],
-   td_info->skb->len, PCI_DMA_TODEVICE);
+   dma_unmap_single(vptr->dev, td_info->skb_dma[i],
+   td_info->skb->len, DMA_TO_DEVICE);
td_info->skb_dma[i] = 0;
}
}
@@ -2027,7 +2027,6 @@ static inline void velocity_iph_realign(struct 
velocity_info *vptr,
  */
 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
 {
-   void 

[PATCHv2 2/3] net: velocity: Convert to generic dma functions

2013-04-29 Thread Tony Prisk
Remove the pci_* dma functions and replace with the more generic
versions.

In preparation of adding platform support, a new struct device *dev
is added to struct velocity_info which can be used by both the pci
and platform code.

Signed-off-by: Tony Prisk li...@prisktech.co.nz
---
 drivers/net/ethernet/via/via-velocity.c |   51 +++
 drivers/net/ethernet/via/via-velocity.h |1 +
 2 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/drivers/net/ethernet/via/via-velocity.c 
b/drivers/net/ethernet/via/via-velocity.c
index 9a408b1..e2c4887 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -46,6 +46,7 @@
 #include linux/types.h
 #include linux/bitops.h
 #include linux/init.h
+#include linux/dma-mapping.h
 #include linux/mm.h
 #include linux/errno.h
 #include linux/ioport.h
@@ -1457,7 +1458,6 @@ static int velocity_init_dma_rings(struct velocity_info 
*vptr)
struct velocity_opt *opt = vptr-options;
const unsigned int rx_ring_size = opt-numrx * sizeof(struct rx_desc);
const unsigned int tx_ring_size = opt-numtx * sizeof(struct tx_desc);
-   struct pci_dev *pdev = vptr-pdev;
dma_addr_t pool_dma;
void *pool;
unsigned int i;
@@ -1465,13 +1465,13 @@ static int velocity_init_dma_rings(struct velocity_info 
*vptr)
/*
 * Allocate all RD/TD rings a single pool.
 *
-* pci_alloc_consistent() fulfills the requirement for 64 bytes
+* dma_alloc_coherent() fulfills the requirement for 64 bytes
 * alignment
 */
-   pool = pci_alloc_consistent(pdev, tx_ring_size * vptr-tx.numq +
-   rx_ring_size, pool_dma);
+   pool = dma_alloc_coherent(vptr-dev, tx_ring_size * vptr-tx.numq +
+   rx_ring_size, pool_dma, GFP_ATOMIC);
if (!pool) {
-   dev_err(pdev-dev, %s : DMA memory allocation failed.\n,
+   dev_err(vptr-dev, %s : DMA memory allocation failed.\n,
vptr-netdev-name);
return -ENOMEM;
}
@@ -1522,8 +1522,8 @@ static int velocity_alloc_rx_buf(struct velocity_info 
*vptr, int idx)
 */
skb_reserve(rd_info-skb,
64 - ((unsigned long) rd_info-skb-data  63));
-   rd_info-skb_dma = pci_map_single(vptr-pdev, rd_info-skb-data,
-   vptr-rx.buf_sz, PCI_DMA_FROMDEVICE);
+   rd_info-skb_dma = dma_map_single(vptr-dev, rd_info-skb-data,
+   vptr-rx.buf_sz, DMA_FROM_DEVICE);
 
/*
 *  Fill in the descriptor to match
@@ -1586,8 +1586,8 @@ static void velocity_free_rd_ring(struct velocity_info 
*vptr)
 
if (!rd_info-skb)
continue;
-   pci_unmap_single(vptr-pdev, rd_info-skb_dma, vptr-rx.buf_sz,
-PCI_DMA_FROMDEVICE);
+   dma_unmap_single(vptr-dev, rd_info-skb_dma, vptr-rx.buf_sz,
+DMA_FROM_DEVICE);
rd_info-skb_dma = 0;
 
dev_kfree_skb(rd_info-skb);
@@ -1668,7 +1668,7 @@ static void velocity_free_dma_rings(struct velocity_info 
*vptr)
const int size = vptr-options.numrx * sizeof(struct rx_desc) +
vptr-options.numtx * sizeof(struct tx_desc) * vptr-tx.numq;
 
-   pci_free_consistent(vptr-pdev, size, vptr-rx.ring, vptr-rx.pool_dma);
+   dma_free_coherent(vptr-dev, size, vptr-rx.ring, vptr-rx.pool_dma);
 }
 
 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
@@ -1725,8 +1725,8 @@ static void velocity_free_tx_buf(struct velocity_info 
*vptr,
pktlen = max_t(size_t, pktlen,
td-td_buf[i].size  ~TD_QUEUE);
 
-   pci_unmap_single(vptr-pdev, tdinfo-skb_dma[i],
-   le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
+   dma_unmap_single(vptr-dev, tdinfo-skb_dma[i],
+   le16_to_cpu(pktlen), DMA_TO_DEVICE);
}
}
dev_kfree_skb_irq(skb);
@@ -1748,8 +1748,8 @@ static void velocity_free_td_ring_entry(struct 
velocity_info *vptr,
if (td_info-skb) {
for (i = 0; i  td_info-nskb_dma; i++) {
if (td_info-skb_dma[i]) {
-   pci_unmap_single(vptr-pdev, 
td_info-skb_dma[i],
-   td_info-skb-len, PCI_DMA_TODEVICE);
+   dma_unmap_single(vptr-dev, td_info-skb_dma[i],
+   td_info-skb-len, DMA_TO_DEVICE);
td_info-skb_dma[i] = 0;
}
}
@@ -2027,7 +2027,6 @@ static inline void velocity_iph_realign(struct 
velocity_info *vptr,
  */
 static int