[PATCH 3/4] async_tx: replace 'int_en' with operation preparation flags

2007-12-21 Thread Dan Williams
Pass a full set of flags to drivers' per-operation 'prep' routines.
Currently the only flag passed is DMA_PREP_INTERRUPT.  The expectation is
that arch-specific async_tx_find_channel() implementations can exploit this
capability to find the best channel for an operation.

Signed-off-by: Dan Williams <[EMAIL PROTECTED]>
---

 crypto/async_tx/async_memcpy.c |3 ++-
 crypto/async_tx/async_memset.c |3 ++-
 crypto/async_tx/async_xor.c|   10 ++
 drivers/dma/ioat_dma.c |4 ++--
 drivers/dma/iop-adma.c |   20 ++--
 include/asm-arm/arch-iop13xx/adma.h|   18 ++
 include/asm-arm/hardware/iop3xx-adma.h |   30 +-
 include/linux/dmaengine.h  |   17 +
 8 files changed, 62 insertions(+), 43 deletions(-)

diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index faca0bc..25dcf33 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -52,6 +52,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned 
int dest_offset,
 
if (device) {
dma_addr_t dma_dest, dma_src;
+   unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
 
dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
DMA_FROM_DEVICE);
@@ -60,7 +61,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned 
int dest_offset,
   DMA_TO_DEVICE);
 
tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
-   len, cb_fn != NULL);
+   len, dma_prep_flags);
}
 
if (tx) {
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 0c94851..8e98ab0 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -52,12 +52,13 @@ async_memset(struct page *dest, int val, unsigned int 
offset,
 
if (device) {
dma_addr_t dma_dest;
+   unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
 
dma_dest = dma_map_page(device->dev, dest, offset, len,
DMA_FROM_DEVICE);
 
tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
-   cb_fn != NULL);
+   dma_prep_flags);
}
 
if (tx) {
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index fbf113a..80f30bc 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -41,6 +41,7 @@ do_async_xor(struct dma_device *device,
dma_addr_t *dma_src = (dma_addr_t *) src_list;
struct dma_async_tx_descriptor *tx;
int i;
+   unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
 
pr_debug("%s: len: %zu\n", __FUNCTION__, len);
 
@@ -56,7 +57,7 @@ do_async_xor(struct dma_device *device,
 * in case they can not provide a descriptor
 */
tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
-cb_fn != NULL);
+dma_prep_flags);
if (!tx) {
if (depend_tx)
dma_wait_for_async_tx(depend_tx);
@@ -64,7 +65,7 @@ do_async_xor(struct dma_device *device,
while (!tx)
tx = device->device_prep_dma_xor(chan, dma_dest,
 dma_src, src_cnt, len,
-cb_fn != NULL);
+dma_prep_flags);
}
 
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
@@ -264,6 +265,7 @@ async_xor_zero_sum(struct page *dest, struct page 
**src_list,
 
if (device) {
dma_addr_t *dma_src = (dma_addr_t *) src_list;
+   unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
int i;
 
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
@@ -274,7 +276,7 @@ async_xor_zero_sum(struct page *dest, struct page 
**src_list,
 
tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
  len, result,
- cb_fn != NULL);
+ dma_prep_flags);
if (!tx) {
if (depend_tx)
dma_wait_for_async_tx(depend_tx);
@@ -282,7 +284,7 @@ async_xor_zero_sum(struct page *dest, struct page 
**src_list,
while (!tx)
tx = 

[PATCH 3/4] async_tx: replace 'int_en' with operation preparation flags

2007-12-21 Thread Dan Williams
Pass a full set of flags to drivers' per-operation 'prep' routines.
Currently the only flag passed is DMA_PREP_INTERRUPT.  The expectation is
that arch-specific async_tx_find_channel() implementations can exploit this
capability to find the best channel for an operation.

Signed-off-by: Dan Williams [EMAIL PROTECTED]
---

 crypto/async_tx/async_memcpy.c |3 ++-
 crypto/async_tx/async_memset.c |3 ++-
 crypto/async_tx/async_xor.c|   10 ++
 drivers/dma/ioat_dma.c |4 ++--
 drivers/dma/iop-adma.c |   20 ++--
 include/asm-arm/arch-iop13xx/adma.h|   18 ++
 include/asm-arm/hardware/iop3xx-adma.h |   30 +-
 include/linux/dmaengine.h  |   17 +
 8 files changed, 62 insertions(+), 43 deletions(-)

diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index faca0bc..25dcf33 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -52,6 +52,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned 
int dest_offset,
 
if (device) {
dma_addr_t dma_dest, dma_src;
+   unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
 
dma_dest = dma_map_page(device-dev, dest, dest_offset, len,
DMA_FROM_DEVICE);
@@ -60,7 +61,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned 
int dest_offset,
   DMA_TO_DEVICE);
 
tx = device-device_prep_dma_memcpy(chan, dma_dest, dma_src,
-   len, cb_fn != NULL);
+   len, dma_prep_flags);
}
 
if (tx) {
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 0c94851..8e98ab0 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -52,12 +52,13 @@ async_memset(struct page *dest, int val, unsigned int 
offset,
 
if (device) {
dma_addr_t dma_dest;
+   unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
 
dma_dest = dma_map_page(device-dev, dest, offset, len,
DMA_FROM_DEVICE);
 
tx = device-device_prep_dma_memset(chan, dma_dest, val, len,
-   cb_fn != NULL);
+   dma_prep_flags);
}
 
if (tx) {
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index fbf113a..80f30bc 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -41,6 +41,7 @@ do_async_xor(struct dma_device *device,
dma_addr_t *dma_src = (dma_addr_t *) src_list;
struct dma_async_tx_descriptor *tx;
int i;
+   unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
 
pr_debug(%s: len: %zu\n, __FUNCTION__, len);
 
@@ -56,7 +57,7 @@ do_async_xor(struct dma_device *device,
 * in case they can not provide a descriptor
 */
tx = device-device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
-cb_fn != NULL);
+dma_prep_flags);
if (!tx) {
if (depend_tx)
dma_wait_for_async_tx(depend_tx);
@@ -64,7 +65,7 @@ do_async_xor(struct dma_device *device,
while (!tx)
tx = device-device_prep_dma_xor(chan, dma_dest,
 dma_src, src_cnt, len,
-cb_fn != NULL);
+dma_prep_flags);
}
 
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
@@ -264,6 +265,7 @@ async_xor_zero_sum(struct page *dest, struct page 
**src_list,
 
if (device) {
dma_addr_t *dma_src = (dma_addr_t *) src_list;
+   unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
int i;
 
pr_debug(%s: (async) len: %zu\n, __FUNCTION__, len);
@@ -274,7 +276,7 @@ async_xor_zero_sum(struct page *dest, struct page 
**src_list,
 
tx = device-device_prep_dma_zero_sum(chan, dma_src, src_cnt,
  len, result,
- cb_fn != NULL);
+ dma_prep_flags);
if (!tx) {
if (depend_tx)
dma_wait_for_async_tx(depend_tx);
@@ -282,7 +284,7 @@ async_xor_zero_sum(struct page *dest, struct page 
**src_list,
while (!tx)
tx =