Hi Thomas,

I love your patch! Yet something to improve:

[auto build test ERROR on v4.16-rc4]
[also build test ERROR on next-20180309]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:    
https://github.com/0day-ci/linux/commits/Thomas-Falcon/ibmvnic-Fix-VLAN-and-other-device-errata/20180313-125518
config: powerpc-allmodconfig (attached as .config)
compiler: powerpc64-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=powerpc 

All error/warnings (new ones prefixed by >>):

   drivers/net/ethernet/ibm/ibmvnic.c: In function 'ibmvnic_xmit':
>> drivers/net/ethernet/ibm/ibmvnic.c:1386:36: error: passing argument 2 of 
>> 'ibmvnic_xmit_workarounds' from incompatible pointer type 
>> [-Werror=incompatible-pointer-types]
     if (ibmvnic_xmit_workarounds(skb, adapter)) {
                                       ^~~~~~~
   drivers/net/ethernet/ibm/ibmvnic.c:1336:12: note: expected 'struct 
net_device *' but argument is of type 'struct ibmvnic_adapter *'
    static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
               ^~~~~~~~~~~~~~~~~~~~~~~~
   drivers/net/ethernet/ibm/ibmvnic.c: In function 'ibmvnic_xmit_workarounds':
>> drivers/net/ethernet/ibm/ibmvnic.c:1347:1: warning: control reaches end of 
>> non-void function [-Wreturn-type]
    }
    ^
   cc1: some warnings being treated as errors

vim +/ibmvnic_xmit_workarounds +1386 drivers/net/ethernet/ibm/ibmvnic.c

  1335  
  1336  static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
  1337                                      struct net_device *netdev)
  1338  {
  1339          /* For some backing devices, mishandling of small packets
  1340           * can result in a loss of connection or TX stall. Device
  1341           * architects recommend that no packet should be smaller
  1342           * than the minimum MTU value provided to the driver, so
  1343           * pad any packets to that length
  1344           */
  1345          if (skb->len < netdev->min_mtu)
  1346                  return skb_put_padto(skb, netdev->min_mtu);
> 1347  }
  1348  
  1349  static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
  1350  {
  1351          struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  1352          int queue_num = skb_get_queue_mapping(skb);
  1353          u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
  1354          struct device *dev = &adapter->vdev->dev;
  1355          struct ibmvnic_tx_buff *tx_buff = NULL;
  1356          struct ibmvnic_sub_crq_queue *tx_scrq;
  1357          struct ibmvnic_tx_pool *tx_pool;
  1358          unsigned int tx_send_failed = 0;
  1359          unsigned int tx_map_failed = 0;
  1360          unsigned int tx_dropped = 0;
  1361          unsigned int tx_packets = 0;
  1362          unsigned int tx_bytes = 0;
  1363          dma_addr_t data_dma_addr;
  1364          struct netdev_queue *txq;
  1365          unsigned long lpar_rc;
  1366          union sub_crq tx_crq;
  1367          unsigned int offset;
  1368          int num_entries = 1;
  1369          unsigned char *dst;
  1370          u64 *handle_array;
  1371          int index = 0;
  1372          u8 proto = 0;
  1373          int ret = 0;
  1374  
  1375          if (adapter->resetting) {
  1376                  if (!netif_subqueue_stopped(netdev, skb))
  1377                          netif_stop_subqueue(netdev, queue_num);
  1378                  dev_kfree_skb_any(skb);
  1379  
  1380                  tx_send_failed++;
  1381                  tx_dropped++;
  1382                  ret = NETDEV_TX_OK;
  1383                  goto out;
  1384          }
  1385  
> 1386          if (ibmvnic_xmit_workarounds(skb, adapter)) {
  1387                  tx_dropped++;
  1388                  tx_send_failed++;
  1389                  ret = NETDEV_TX_OK;
  1390                  goto out;
  1391          }
  1392  
  1393          tx_pool = &adapter->tx_pool[queue_num];
  1394          tx_scrq = adapter->tx_scrq[queue_num];
  1395          txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
  1396          handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
  1397                  
be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
  1398  
  1399          index = tx_pool->free_map[tx_pool->consumer_index];
  1400  
  1401          if (skb_is_gso(skb)) {
  1402                  offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ;
  1403                  dst = tx_pool->tso_ltb.buff + offset;
  1404                  memset(dst, 0, IBMVNIC_TSO_BUF_SZ);
  1405                  data_dma_addr = tx_pool->tso_ltb.addr + offset;
  1406                  tx_pool->tso_index++;
  1407                  if (tx_pool->tso_index == IBMVNIC_TSO_BUFS)
  1408                          tx_pool->tso_index = 0;
  1409          } else {
  1410                  offset = index * (adapter->req_mtu + VLAN_HLEN);
  1411                  dst = tx_pool->long_term_buff.buff + offset;
  1412                  memset(dst, 0, adapter->req_mtu + VLAN_HLEN);
  1413                  data_dma_addr = tx_pool->long_term_buff.addr + offset;
  1414          }
  1415  
  1416          if (skb_shinfo(skb)->nr_frags) {
  1417                  int cur, i;
  1418  
  1419                  /* Copy the head */
  1420                  skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
  1421                  cur = skb_headlen(skb);
  1422  
  1423                  /* Copy the frags */
  1424                  for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1425                          const skb_frag_t *frag = 
&skb_shinfo(skb)->frags[i];
  1426  
  1427                          memcpy(dst + cur,
  1428                                 page_address(skb_frag_page(frag)) +
  1429                                 frag->page_offset, skb_frag_size(frag));
  1430                          cur += skb_frag_size(frag);
  1431                  }
  1432          } else {
  1433                  skb_copy_from_linear_data(skb, dst, skb->len);
  1434          }
  1435  
  1436          tx_pool->consumer_index =
  1437              (tx_pool->consumer_index + 1) %
  1438                  adapter->req_tx_entries_per_subcrq;
  1439  
  1440          tx_buff = &tx_pool->tx_buff[index];
  1441          tx_buff->skb = skb;
  1442          tx_buff->data_dma[0] = data_dma_addr;
  1443          tx_buff->data_len[0] = skb->len;
  1444          tx_buff->index = index;
  1445          tx_buff->pool_index = queue_num;
  1446          tx_buff->last_frag = true;
  1447  
  1448          memset(&tx_crq, 0, sizeof(tx_crq));
  1449          tx_crq.v1.first = IBMVNIC_CRQ_CMD;
  1450          tx_crq.v1.type = IBMVNIC_TX_DESC;
  1451          tx_crq.v1.n_crq_elem = 1;
  1452          tx_crq.v1.n_sge = 1;
  1453          tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
  1454          tx_crq.v1.correlator = cpu_to_be32(index);
  1455          if (skb_is_gso(skb))
  1456                  tx_crq.v1.dma_reg = 
cpu_to_be16(tx_pool->tso_ltb.map_id);
  1457          else
  1458                  tx_crq.v1.dma_reg = 
cpu_to_be16(tx_pool->long_term_buff.map_id);
  1459          tx_crq.v1.sge_len = cpu_to_be32(skb->len);
  1460          tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
  1461  
  1462          if (adapter->vlan_header_insertion) {
  1463                  tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
  1464                  tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
  1465          }
  1466  
  1467          if (skb->protocol == htons(ETH_P_IP)) {
  1468                  tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
  1469                  proto = ip_hdr(skb)->protocol;
  1470          } else if (skb->protocol == htons(ETH_P_IPV6)) {
  1471                  tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
  1472                  proto = ipv6_hdr(skb)->nexthdr;
  1473          }
  1474  
  1475          if (proto == IPPROTO_TCP)
  1476                  tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
  1477          else if (proto == IPPROTO_UDP)
  1478                  tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
  1479  
  1480          if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1481                  tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
  1482                  hdrs += 2;
  1483          }
  1484          if (skb_is_gso(skb)) {
  1485                  tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
  1486                  tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
  1487                  hdrs += 2;
  1488          }
  1489          /* determine if l2/3/4 headers are sent to firmware */
  1490          if ((*hdrs >> 7) & 1) {
  1491                  build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
  1492                  tx_crq.v1.n_crq_elem = num_entries;
  1493                  tx_buff->indir_arr[0] = tx_crq;
  1494                  tx_buff->indir_dma = dma_map_single(dev, 
tx_buff->indir_arr,
  1495                                                      
sizeof(tx_buff->indir_arr),
  1496                                                      DMA_TO_DEVICE);
  1497                  if (dma_mapping_error(dev, tx_buff->indir_dma)) {
  1498                          dev_kfree_skb_any(skb);
  1499                          tx_buff->skb = NULL;
  1500                          if (!firmware_has_feature(FW_FEATURE_CMO))
  1501                                  dev_err(dev, "tx: unable to map 
descriptor array\n");
  1502                          tx_map_failed++;
  1503                          tx_dropped++;
  1504                          ret = NETDEV_TX_OK;
  1505                          goto out;
  1506                  }
  1507                  lpar_rc = send_subcrq_indirect(adapter, 
handle_array[queue_num],
  1508                                                 (u64)tx_buff->indir_dma,
  1509                                                 (u64)num_entries);
  1510          } else {
  1511                  lpar_rc = send_subcrq(adapter, handle_array[queue_num],
  1512                                        &tx_crq);
  1513          }
  1514          if (lpar_rc != H_SUCCESS) {
  1515                  dev_err(dev, "tx failed with code %ld\n", lpar_rc);
  1516  
  1517                  if (tx_pool->consumer_index == 0)
  1518                          tx_pool->consumer_index =
  1519                                  adapter->req_tx_entries_per_subcrq - 1;
  1520                  else
  1521                          tx_pool->consumer_index--;
  1522  
  1523                  dev_kfree_skb_any(skb);
  1524                  tx_buff->skb = NULL;
  1525  
  1526                  if (lpar_rc == H_CLOSED) {
  1527                          /* Disable TX and report carrier off if queue 
is closed.
  1528                           * Firmware guarantees that a signal will be 
sent to the
  1529                           * driver, triggering a reset or some other 
action.
  1530                           */
  1531                          netif_tx_stop_all_queues(netdev);
  1532                          netif_carrier_off(netdev);
  1533                  }
  1534  
  1535                  tx_send_failed++;
  1536                  tx_dropped++;
  1537                  ret = NETDEV_TX_OK;
  1538                  goto out;
  1539          }
  1540  
  1541          if (atomic_inc_return(&tx_scrq->used)
  1542                                          >= 
adapter->req_tx_entries_per_subcrq) {
  1543                  netdev_info(netdev, "Stopping queue %d\n", queue_num);
  1544                  netif_stop_subqueue(netdev, queue_num);
  1545          }
  1546  
  1547          tx_packets++;
  1548          tx_bytes += skb->len;
  1549          txq->trans_start = jiffies;
  1550          ret = NETDEV_TX_OK;
  1551  
  1552  out:
  1553          netdev->stats.tx_dropped += tx_dropped;
  1554          netdev->stats.tx_bytes += tx_bytes;
  1555          netdev->stats.tx_packets += tx_packets;
  1556          adapter->tx_send_failed += tx_send_failed;
  1557          adapter->tx_map_failed += tx_map_failed;
  1558          adapter->tx_stats_buffers[queue_num].packets += tx_packets;
  1559          adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
  1560          adapter->tx_stats_buffers[queue_num].dropped_packets += 
tx_dropped;
  1561  
  1562          return ret;
  1563  }
  1564  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to