Previous patch sets USE_ASYNC_IOBDMA to 1 unconditionally.  Remove
USE_ASYNC_IOBDMA from all if statements.  Remove dead code caused by
the change.

Acked-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
Signed-off-by: David Daney <david.da...@cavium.com>
---
 drivers/staging/octeon/ethernet-defines.h |  6 ---
 drivers/staging/octeon/ethernet-rx.c      | 25 ++++-----
 drivers/staging/octeon/ethernet-tx.c      | 85 ++++++++++---------------------
 3 files changed, 37 insertions(+), 79 deletions(-)

diff --git a/drivers/staging/octeon/ethernet-defines.h 
b/drivers/staging/octeon/ethernet-defines.h
index 33c71f86890b..15db928c4712 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -7,10 +7,6 @@
 
 /*
  * A few defines are used to control the operation of this driver:
- *  USE_ASYNC_IOBDMA
- *      Use asynchronous IO access to hardware. This uses Octeon's asynchronous
- *      IOBDMAs to issue IO accesses without stalling. Set this to zero
- *      to disable this. Note that IOBDMAs require CVMSEG.
  *  REUSE_SKBUFFS_WITHOUT_FREE
  *      Allows the TX path to free an skbuff into the FPA hardware pool. This
  *      can significantly improve performance for forwarding and bridging, but
@@ -29,8 +25,6 @@
 #define REUSE_SKBUFFS_WITHOUT_FREE  1
 #endif
 
-#define USE_ASYNC_IOBDMA       1
-
 /* Maximum number of SKBs to try to free per xmit packet. */
 #define MAX_OUT_QUEUE_DEPTH 1000
 
diff --git a/drivers/staging/octeon/ethernet-rx.c 
b/drivers/staging/octeon/ethernet-rx.c
index 5e271245273c..c1ae60ce11f5 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -198,11 +198,9 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
        /* Prefetch cvm_oct_device since we know we need it soon */
        prefetch(cvm_oct_device);
 
-       if (USE_ASYNC_IOBDMA) {
-               /* Save scratch in case userspace is using it */
-               CVMX_SYNCIOBDMA;
-               old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-       }
+       /* Save scratch in case userspace is using it */
+       CVMX_SYNCIOBDMA;
+       old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
 
        /* Only allow work for our group (and preserve priorities) */
        if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
@@ -217,10 +215,8 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
                               BIT(rx_group->group));
        }
 
-       if (USE_ASYNC_IOBDMA) {
-               cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
-               did_work_request = 1;
-       }
+       cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+       did_work_request = 1;
 
        while (rx_count < budget) {
                struct sk_buff *skb = NULL;
@@ -229,7 +225,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
                cvmx_wqe_t *work;
                int port;
 
-               if (USE_ASYNC_IOBDMA && did_work_request)
+               if (did_work_request)
                        work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
                else
                        work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
@@ -257,7 +253,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
                        sizeof(void *));
                prefetch(pskb);
 
-               if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
+               if (rx_count < (budget - 1)) {
                        cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
                                                            CVMX_POW_NO_WAIT);
                        did_work_request = 1;
@@ -400,10 +396,9 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
                cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
        }
 
-       if (USE_ASYNC_IOBDMA) {
-               /* Restore the scratch area */
-               cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
-       }
+       /* Restore the scratch area */
+       cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
+
        cvm_oct_rx_refill_pool(0);
 
        return rx_count;
diff --git a/drivers/staging/octeon/ethernet-tx.c 
b/drivers/staging/octeon/ethernet-tx.c
index df3441b815bb..2aa5fcb7ee32 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -176,23 +176,18 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device 
*dev)
                qos = 0;
        }
 
-       if (USE_ASYNC_IOBDMA) {
-               /* Save scratch in case userspace is using it */
-               CVMX_SYNCIOBDMA;
-               old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-               old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
-
-               /*
-                * Fetch and increment the number of packets to be
-                * freed.
-                */
-               cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
-                                              FAU_NUM_PACKET_BUFFERS_TO_FREE,
-                                              0);
-               cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
-                                              priv->fau + qos * 4,
-                                              MAX_SKB_TO_FREE);
-       }
+       /* Save scratch in case userspace is using it */
+       CVMX_SYNCIOBDMA;
+       old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+       old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
+
+       /* Fetch and increment the number of packets to be freed. */
+       cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
+                                      FAU_NUM_PACKET_BUFFERS_TO_FREE,
+                                      0);
+       cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
+                                      priv->fau + qos * 4,
+                                      MAX_SKB_TO_FREE);
 
        /*
         * We have space for 6 segment pointers, If there will be more
@@ -201,22 +196,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device 
*dev)
        if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
                if (unlikely(__skb_linearize(skb))) {
                        queue_type = QUEUE_DROP;
-                       if (USE_ASYNC_IOBDMA) {
-                               /*
-                                * Get the number of skbuffs in use
-                                * by the hardware
-                                */
-                               CVMX_SYNCIOBDMA;
-                               skb_to_free =
-                                       cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-                       } else {
-                               /*
-                                * Get the number of skbuffs in use
-                                * by the hardware
-                                */
-                               skb_to_free = cvmx_fau_fetch_and_add32(
-                                       priv->fau + qos * 4, MAX_SKB_TO_FREE);
-                       }
+                       /* Get the number of skbuffs in use by the
+                        * hardware
+                        */
+                       CVMX_SYNCIOBDMA;
+                       skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
                        skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
                                                                 priv->fau +
                                                                 qos * 4);
@@ -384,18 +368,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device 
*dev)
                pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
        }
 
-       if (USE_ASYNC_IOBDMA) {
-               /* Get the number of skbuffs in use by the hardware */
-               CVMX_SYNCIOBDMA;
-               skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-               buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
-       } else {
-               /* Get the number of skbuffs in use by the hardware */
-               skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
-                                                      MAX_SKB_TO_FREE);
-               buffers_to_free =
-                   cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
-       }
+       /* Get the number of skbuffs in use by the hardware */
+       CVMX_SYNCIOBDMA;
+       skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+       buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
 
        skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
                                                 priv->fau + qos * 4);
@@ -413,9 +389,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device 
*dev)
        } else {
                queue_type = QUEUE_HW;
        }
-       if (USE_ASYNC_IOBDMA)
-               cvmx_fau_async_fetch_and_add32(
-                               CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
+       cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 
1);
 
        spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
 
@@ -485,16 +459,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device 
*dev)
                dev_kfree_skb_any(t);
        }
 
-       if (USE_ASYNC_IOBDMA) {
-               CVMX_SYNCIOBDMA;
-               total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-               /* Restore the scratch area */
-               cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
-               cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
-       } else {
-               total_to_clean = cvmx_fau_fetch_and_add32(
-                                               FAU_TOTAL_TX_TO_CLEAN, 1);
-       }
+       CVMX_SYNCIOBDMA;
+       total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+       /* Restore the scratch area */
+       cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
+       cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
 
        if (total_to_clean & 0x3ff) {
                /*
-- 
2.14.3

Reply via email to