[PATCH v6 net-next,mips 4/7] staging: octeon: Remove USE_ASYNC_IOBDMA macro.

2017-12-07 Thread David Daney
Previous patch sets USE_ASYNC_IOBDMA to 1 unconditionally.  Remove
USE_ASYNC_IOBDMA from all if statements.  Remove dead code caused by
the change.

Acked-by: Greg Kroah-Hartman 
Signed-off-by: David Daney 
---
 drivers/staging/octeon/ethernet-defines.h |  6 ---
 drivers/staging/octeon/ethernet-rx.c  | 25 -
 drivers/staging/octeon/ethernet-tx.c  | 85 ++-
 3 files changed, 37 insertions(+), 79 deletions(-)

diff --git a/drivers/staging/octeon/ethernet-defines.h 
b/drivers/staging/octeon/ethernet-defines.h
index e898df25b87f..21438c804a43 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -10,10 +10,6 @@
 
 /*
  * A few defines are used to control the operation of this driver:
- *  USE_ASYNC_IOBDMA
- *  Use asynchronous IO access to hardware. This uses Octeon's asynchronous
- *  IOBDMAs to issue IO accesses without stalling. Set this to zero
- *  to disable this. Note that IOBDMAs require CVMSEG.
  *  REUSE_SKBUFFS_WITHOUT_FREE
  *  Allows the TX path to free an skbuff into the FPA hardware pool. This
  *  can significantly improve performance for forwarding and bridging, but
@@ -32,8 +28,6 @@
 #define REUSE_SKBUFFS_WITHOUT_FREE  1
 #endif
 
-#define USE_ASYNC_IOBDMA   1
-
 /* Maximum number of SKBs to try to free per xmit packet. */
 #define MAX_OUT_QUEUE_DEPTH 1000
 
diff --git a/drivers/staging/octeon/ethernet-rx.c 
b/drivers/staging/octeon/ethernet-rx.c
index 1a44291318ee..dd76c99d5ae0 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -201,11 +201,9 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
/* Prefetch cvm_oct_device since we know we need it soon */
prefetch(cvm_oct_device);
 
-   if (USE_ASYNC_IOBDMA) {
-   /* Save scratch in case userspace is using it */
-   CVMX_SYNCIOBDMA;
-   old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-   }
+   /* Save scratch in case userspace is using it */
+   CVMX_SYNCIOBDMA;
+   old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
 
/* Only allow work for our group (and preserve priorities) */
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
@@ -220,10 +218,8 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
   BIT(rx_group->group));
}
 
-   if (USE_ASYNC_IOBDMA) {
-   cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
-   did_work_request = 1;
-   }
+   cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+   did_work_request = 1;
 
while (rx_count < budget) {
struct sk_buff *skb = NULL;
@@ -232,7 +228,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
cvmx_wqe_t *work;
int port;
 
-   if (USE_ASYNC_IOBDMA && did_work_request)
+   if (did_work_request)
work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
else
work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
@@ -260,7 +256,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
sizeof(void *));
prefetch(pskb);
 
-   if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
+   if (rx_count < (budget - 1)) {
cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
CVMX_POW_NO_WAIT);
did_work_request = 1;
@@ -403,10 +399,9 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
}
 
-   if (USE_ASYNC_IOBDMA) {
-   /* Restore the scratch area */
-   cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
-   }
+   /* Restore the scratch area */
+   cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
+
cvm_oct_rx_refill_pool(0);
 
return rx_count;
diff --git a/drivers/staging/octeon/ethernet-tx.c 
b/drivers/staging/octeon/ethernet-tx.c
index 31f35025d19e..2eede0907924 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -179,23 +179,18 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device 
*dev)
qos = 0;
}
 
-   if (USE_ASYNC_IOBDMA) {
-   /* Save scratch in case userspace is using it */
-   CVMX_SYNCIOBDMA;
-   old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-   old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
-
-   /*
-* Fetch and increment the number of packets to be
-* freed.
-*/
-  

[PATCH v6 net-next,mips 4/7] staging: octeon: Remove USE_ASYNC_IOBDMA macro.

2017-12-07 Thread David Daney
Previous patch sets USE_ASYNC_IOBDMA to 1 unconditionally.  Remove
USE_ASYNC_IOBDMA from all if statements.  Remove dead code caused by
the change.

Acked-by: Greg Kroah-Hartman 
Signed-off-by: David Daney 
---
 drivers/staging/octeon/ethernet-defines.h |  6 ---
 drivers/staging/octeon/ethernet-rx.c  | 25 -
 drivers/staging/octeon/ethernet-tx.c  | 85 ++-
 3 files changed, 37 insertions(+), 79 deletions(-)

diff --git a/drivers/staging/octeon/ethernet-defines.h 
b/drivers/staging/octeon/ethernet-defines.h
index e898df25b87f..21438c804a43 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -10,10 +10,6 @@
 
 /*
  * A few defines are used to control the operation of this driver:
- *  USE_ASYNC_IOBDMA
- *  Use asynchronous IO access to hardware. This uses Octeon's asynchronous
- *  IOBDMAs to issue IO accesses without stalling. Set this to zero
- *  to disable this. Note that IOBDMAs require CVMSEG.
  *  REUSE_SKBUFFS_WITHOUT_FREE
  *  Allows the TX path to free an skbuff into the FPA hardware pool. This
  *  can significantly improve performance for forwarding and bridging, but
@@ -32,8 +28,6 @@
 #define REUSE_SKBUFFS_WITHOUT_FREE  1
 #endif
 
-#define USE_ASYNC_IOBDMA   1
-
 /* Maximum number of SKBs to try to free per xmit packet. */
 #define MAX_OUT_QUEUE_DEPTH 1000
 
diff --git a/drivers/staging/octeon/ethernet-rx.c 
b/drivers/staging/octeon/ethernet-rx.c
index 1a44291318ee..dd76c99d5ae0 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -201,11 +201,9 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
/* Prefetch cvm_oct_device since we know we need it soon */
prefetch(cvm_oct_device);
 
-   if (USE_ASYNC_IOBDMA) {
-   /* Save scratch in case userspace is using it */
-   CVMX_SYNCIOBDMA;
-   old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-   }
+   /* Save scratch in case userspace is using it */
+   CVMX_SYNCIOBDMA;
+   old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
 
/* Only allow work for our group (and preserve priorities) */
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
@@ -220,10 +218,8 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
   BIT(rx_group->group));
}
 
-   if (USE_ASYNC_IOBDMA) {
-   cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
-   did_work_request = 1;
-   }
+   cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+   did_work_request = 1;
 
while (rx_count < budget) {
struct sk_buff *skb = NULL;
@@ -232,7 +228,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
cvmx_wqe_t *work;
int port;
 
-   if (USE_ASYNC_IOBDMA && did_work_request)
+   if (did_work_request)
work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
else
work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
@@ -260,7 +256,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
sizeof(void *));
prefetch(pskb);
 
-   if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
+   if (rx_count < (budget - 1)) {
cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
CVMX_POW_NO_WAIT);
did_work_request = 1;
@@ -403,10 +399,9 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int 
budget)
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
}
 
-   if (USE_ASYNC_IOBDMA) {
-   /* Restore the scratch area */
-   cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
-   }
+   /* Restore the scratch area */
+   cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
+
cvm_oct_rx_refill_pool(0);
 
return rx_count;
diff --git a/drivers/staging/octeon/ethernet-tx.c 
b/drivers/staging/octeon/ethernet-tx.c
index 31f35025d19e..2eede0907924 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -179,23 +179,18 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device 
*dev)
qos = 0;
}
 
-   if (USE_ASYNC_IOBDMA) {
-   /* Save scratch in case userspace is using it */
-   CVMX_SYNCIOBDMA;
-   old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
-   old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
-
-   /*
-* Fetch and increment the number of packets to be
-* freed.
-*/
-