Re: [PATCH v6 net-next 2/5] net: ethernet: ti: davinci_cpdma: add dma mapped submit

2019-07-05 Thread kbuild test robot
Hi Ivan,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url:
https://github.com/0day-ci/linux/commits/Ivan-Khoronzhuk/xdp-allow-same-allocator-usage/20190706-003850
config: arm64-allmodconfig (attached as .config)
compiler: aarch64-linux-gcc (GCC) 7.4.0
reproduce:
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
GCC_VERSION=7.4.0 make.cross ARCH=arm64 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot 

All warnings (new ones prefixed by >>):

   drivers/net//ethernet/ti/davinci_cpdma.c: In function 'cpdma_chan_submit_si':
>> drivers/net//ethernet/ti/davinci_cpdma.c:1047:12: warning: cast from pointer 
>> to integer of different size [-Wpointer-to-int-cast]
  buffer = (u32)si->data;
   ^
   drivers/net//ethernet/ti/davinci_cpdma.c: In function 
'cpdma_chan_idle_submit_mapped':
>> drivers/net//ethernet/ti/davinci_cpdma.c:1114:12: warning: cast to pointer 
>> from integer of different size [-Wint-to-pointer-cast]
 si.data = (void *)(u32)data;
   ^
   drivers/net//ethernet/ti/davinci_cpdma.c: In function 
'cpdma_chan_submit_mapped':
   drivers/net//ethernet/ti/davinci_cpdma.c:1164:12: warning: cast to pointer 
from integer of different size [-Wint-to-pointer-cast]
 si.data = (void *)(u32)data;
   ^

vim +1047 drivers/net//ethernet/ti/davinci_cpdma.c

  1015  
  1016  static int cpdma_chan_submit_si(struct submit_info *si)
  1017  {
  1018  struct cpdma_chan   *chan = si->chan;
  1019  struct cpdma_ctlr   *ctlr = chan->ctlr;
  1020  int len = si->len;
  1021  int swlen = len;
  1022  struct cpdma_desc __iomem   *desc;
  1023  dma_addr_t  buffer;
  1024  u32 mode;
  1025  int ret;
  1026  
  1027  if (chan->count >= chan->desc_num)  {
  1028  chan->stats.desc_alloc_fail++;
  1029  return -ENOMEM;
  1030  }
  1031  
  1032  desc = cpdma_desc_alloc(ctlr->pool);
  1033  if (!desc) {
  1034  chan->stats.desc_alloc_fail++;
  1035  return -ENOMEM;
  1036  }
  1037  
  1038  if (len < ctlr->params.min_packet_size) {
  1039  len = ctlr->params.min_packet_size;
  1040  chan->stats.runt_transmit_buff++;
  1041  }
  1042  
  1043  mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  1044  cpdma_desc_to_port(chan, mode, si->directed);
  1045  
  1046  if (si->flags & CPDMA_DMA_EXT_MAP) {
> 1047  buffer = (u32)si->data;
  1048  dma_sync_single_for_device(ctlr->dev, buffer, len, 
chan->dir);
  1049  swlen |= CPDMA_DMA_EXT_MAP;
  1050  } else {
  1051  buffer = dma_map_single(ctlr->dev, si->data, len, 
chan->dir);
  1052  ret = dma_mapping_error(ctlr->dev, buffer);
  1053  if (ret) {
  1054  cpdma_desc_free(ctlr->pool, desc, 1);
  1055  return -EINVAL;
  1056  }
  1057  }
  1058  
  1059  /* Relaxed IO accessors can be used here as there is read 
barrier
  1060   * at the end of write sequence.
  1061   */
  1062  writel_relaxed(0, >hw_next);
  1063  writel_relaxed(buffer, >hw_buffer);
  1064  writel_relaxed(len, >hw_len);
  1065  writel_relaxed(mode | len, >hw_mode);
  1066  writel_relaxed((uintptr_t)si->token, >sw_token);
  1067  writel_relaxed(buffer, >sw_buffer);
  1068  writel_relaxed(swlen, >sw_len);
  1069  desc_read(desc, sw_len);
  1070  
  1071  __cpdma_chan_submit(chan, desc);
  1072  
  1073  if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
  1074  chan_write(chan, rxfree, 1);
  1075  
  1076  chan->count++;
  1077  return 0;
  1078  }
  1079  
  1080  int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void 
*data,
  1081 int len, int directed)
  1082  {
  1083  struct submit_info si;
  1084  unsigned long flags;
  1085  int ret;
  1086  
  1087  si.chan = chan;
  1088  si.token = token;
  1089  si.data = data;
  1090  si.len = len;
  1091  si.directed = directed;
  1092  si.flags = 0;
  1093  
  1094  spin_lock_irqsave(>lock, flags);
  1095  if (chan->state == CPDMA_STATE_TEARDOWN) {
  1096  spin_unlock_irqrestore(>lock, flags);
  1097  return 

[PATCH v6 net-next 2/5] net: ethernet: ti: davinci_cpdma: add dma mapped submit

2019-07-03 Thread Ivan Khoronzhuk
In case if dma mapped packet needs to be sent, like with XDP
page pool, the "mapped" submit can be used. This patch adds dma
mapped submit based on regular one.

Signed-off-by: Ivan Khoronzhuk 
---
 drivers/net/ethernet/ti/davinci_cpdma.c | 89 ++---
 drivers/net/ethernet/ti/davinci_cpdma.h |  4 ++
 2 files changed, 83 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c 
b/drivers/net/ethernet/ti/davinci_cpdma.c
index 5cf1758d425b..8da46394c0e7 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -139,6 +139,7 @@ struct submit_info {
int directed;
void *token;
void *data;
+   int flags;
int len;
 };
 
@@ -184,6 +185,8 @@ static struct cpdma_control_info controls[] = {
 (directed << CPDMA_TO_PORT_SHIFT));\
} while (0)
 
+#define CPDMA_DMA_EXT_MAP  BIT(16)
+
 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
 {
struct cpdma_desc_pool *pool = ctlr->pool;
@@ -1015,6 +1018,7 @@ static int cpdma_chan_submit_si(struct submit_info *si)
struct cpdma_chan   *chan = si->chan;
struct cpdma_ctlr   *ctlr = chan->ctlr;
int len = si->len;
+   int swlen = len;
struct cpdma_desc __iomem   *desc;
dma_addr_t  buffer;
u32 mode;
@@ -1036,16 +1040,22 @@ static int cpdma_chan_submit_si(struct submit_info *si)
chan->stats.runt_transmit_buff++;
}
 
-   buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
-   ret = dma_mapping_error(ctlr->dev, buffer);
-   if (ret) {
-   cpdma_desc_free(ctlr->pool, desc, 1);
-   return -EINVAL;
-   }
-
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
cpdma_desc_to_port(chan, mode, si->directed);
 
+   if (si->flags & CPDMA_DMA_EXT_MAP) {
+   buffer = (u32)si->data;
+   dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
+   swlen |= CPDMA_DMA_EXT_MAP;
+   } else {
+   buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
+   ret = dma_mapping_error(ctlr->dev, buffer);
+   if (ret) {
+   cpdma_desc_free(ctlr->pool, desc, 1);
+   return -EINVAL;
+   }
+   }
+
/* Relaxed IO accessors can be used here as there is read barrier
 * at the end of write sequence.
 */
@@ -1055,7 +1065,7 @@ static int cpdma_chan_submit_si(struct submit_info *si)
writel_relaxed(mode | len, >hw_mode);
writel_relaxed((uintptr_t)si->token, >sw_token);
writel_relaxed(buffer, >sw_buffer);
-   writel_relaxed(len, >sw_len);
+   writel_relaxed(swlen, >sw_len);
desc_read(desc, sw_len);
 
__cpdma_chan_submit(chan, desc);
@@ -1079,6 +1089,32 @@ int cpdma_chan_idle_submit(struct cpdma_chan *chan, void 
*token, void *data,
si.data = data;
si.len = len;
si.directed = directed;
+   si.flags = 0;
+
+   spin_lock_irqsave(>lock, flags);
+   if (chan->state == CPDMA_STATE_TEARDOWN) {
+   spin_unlock_irqrestore(>lock, flags);
+   return -EINVAL;
+   }
+
+   ret = cpdma_chan_submit_si();
+   spin_unlock_irqrestore(>lock, flags);
+   return ret;
+}
+
+int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed)
+{
+   struct submit_info si;
+   unsigned long flags;
+   int ret;
+
+   si.chan = chan;
+   si.token = token;
+   si.data = (void *)(u32)data;
+   si.len = len;
+   si.directed = directed;
+   si.flags = CPDMA_DMA_EXT_MAP;
 
spin_lock_irqsave(>lock, flags);
if (chan->state == CPDMA_STATE_TEARDOWN) {
@@ -1103,6 +1139,32 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void 
*token, void *data,
si.data = data;
si.len = len;
si.directed = directed;
+   si.flags = 0;
+
+   spin_lock_irqsave(>lock, flags);
+   if (chan->state != CPDMA_STATE_ACTIVE) {
+   spin_unlock_irqrestore(>lock, flags);
+   return -EINVAL;
+   }
+
+   ret = cpdma_chan_submit_si();
+   spin_unlock_irqrestore(>lock, flags);
+   return ret;
+}
+
+int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
+dma_addr_t data, int len, int directed)
+{
+   struct submit_info si;
+   unsigned long flags;
+   int ret;
+
+   si.chan = chan;
+   si.token = token;
+   si.data = (void *)(u32)data;
+   si.len = len;
+   si.directed = directed;
+   si.flags = CPDMA_DMA_EXT_MAP;
 
spin_lock_irqsave(>lock, flags);